repo_id
stringlengths
15
86
file_path
stringlengths
27
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/convmixer.py
""" ConvMixer """ import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d from ._registry import register_model, generate_default_cfgs from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq __all__ = ['ConvMixer'] class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class ConvMixer(nn.Module): def __init__( self, dim, depth, kernel_size=9, patch_size=7, in_chans=3, num_classes=1000, global_pool='avg', drop_rate=0., act_layer=nn.GELU, **kwargs, ): super().__init__() self.num_classes = num_classes self.num_features = dim self.grad_checkpointing = False self.stem = nn.Sequential( nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size), act_layer(), nn.BatchNorm2d(dim) ) self.blocks = nn.Sequential( *[nn.Sequential( Residual(nn.Sequential( nn.Conv2d(dim, dim, kernel_size, groups=dim, padding="same"), act_layer(), nn.BatchNorm2d(dim) )), nn.Conv2d(dim, dim, kernel_size=1), act_layer(), nn.BatchNorm2d(dim) ) for i in range(depth)] ) self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(dim, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^stem', blocks=r'^blocks\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.pooling = SelectAdaptivePool2d(pool_type=global_pool, flatten=True) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.pooling(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_convmixer(variant, pretrained=False, **kwargs): return build_model_with_cfg(ConvMixer, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .96, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head', 'first_conv': 'stem.0', **kwargs } default_cfgs = generate_default_cfgs({ 'convmixer_1536_20.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_768_32.in1k': _cfg(hf_hub_id='timm/'), 'convmixer_1024_20_ks9_p14.in1k': _cfg(hf_hub_id='timm/') }) @register_model def convmixer_1536_20(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1536, depth=20, kernel_size=9, patch_size=7, **kwargs) return _create_convmixer('convmixer_1536_20', pretrained, **model_args) @register_model def convmixer_768_32(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=768, depth=32, kernel_size=7, patch_size=7, act_layer=nn.ReLU, **kwargs) return _create_convmixer('convmixer_768_32', pretrained, **model_args) @register_model def convmixer_1024_20_ks9_p14(pretrained=False, **kwargs) -> ConvMixer: model_args = dict(dim=1024, depth=20, kernel_size=9, patch_size=14, **kwargs) return _create_convmixer('convmixer_1024_20_ks9_p14', pretrained, **model_args)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/convnext.py
""" ConvNeXt Papers: * `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf @Article{liu2022convnet, author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, title = {A ConvNet for the 2020s}, journal = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2022}, } * `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 @article{Woo2023ConvNeXtV2, title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders}, author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie}, year={2023}, journal={arXiv preprint arXiv:2301.00808}, } Original code and weights from: * https://github.com/facebookresearch/ConvNeXt, original copyright below * https://github.com/facebookresearch/ConvNeXt-V2, original copyright below Model defs atto, femto, pico, nano and _ols / _hnf variants are timm originals. Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # ConvNeXt # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the MIT license # ConvNeXt-V2 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree (Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)) # No code was used directly from ConvNeXt-V2, however the weights are CC BY-NC 4.0 so beware if using commercially. from collections import OrderedDict from functools import partial from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import trunc_normal_, AvgPool2dSame, DropPath, Mlp, GlobalResponseNormMlp, \ LayerNorm2d, LayerNorm, create_conv2d, get_act_layer, make_divisible, to_ntuple from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ConvNeXt'] # model_registry will add each entrypoint fn to this class Downsample(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1): super().__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() if in_chs != out_chs: self.conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: self.conv = nn.Identity() def forward(self, x): x = self.pool(x) x = self.conv(x) return x class ConvNeXtBlock(nn.Module): """ ConvNeXt Block There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Union[int, Tuple[int, int]] = (1, 1), mlp_ratio: float = 4, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, ls_init_value: Optional[float] = 1e-6, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Callable] = None, drop_path: float = 0., ): """ Args: in_chs: Block input channels. out_chs: Block output channels (same as in_chs if None). kernel_size: Depthwise convolution kernel size. stride: Stride of depthwise convolution. dilation: Tuple specifying input and output dilation of block. mlp_ratio: MLP expansion ratio. conv_mlp: Use 1x1 convolutions for MLP and a NCHW compatible norm layer if True. conv_bias: Apply bias for all convolution (linear) layers. use_grn: Use GlobalResponseNorm in MLP (from ConvNeXt-V2) ls_init_value: Layer-scale init values, layer-scale applied if not None. act_layer: Activation layer. norm_layer: Normalization layer (defaults to LN if not specified). drop_path: Stochastic depth probability. """ super().__init__() out_chs = out_chs or in_chs dilation = to_ntuple(2)(dilation) act_layer = get_act_layer(act_layer) if not norm_layer: norm_layer = LayerNorm2d if conv_mlp else LayerNorm mlp_layer = partial(GlobalResponseNormMlp if use_grn else Mlp, use_conv=conv_mlp) self.use_conv_mlp = conv_mlp self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride, dilation=dilation[0], depthwise=True, bias=conv_bias, ) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(mlp_ratio * out_chs), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(out_chs)) if ls_init_value is not None else None if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: self.shortcut = Downsample(in_chs, out_chs, stride=stride, dilation=dilation[0]) else: self.shortcut = nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = x.permute(0, 3, 1, 2) if self.gamma is not None: x = x.mul(self.gamma.reshape(1, -1, 1, 1)) x = self.drop_path(x) + self.shortcut(shortcut) return x class ConvNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=7, stride=2, depth=2, dilation=(1, 1), drop_path_rates=None, ls_init_value=1.0, conv_mlp=False, conv_bias=True, use_grn=False, act_layer='gelu', norm_layer=None, norm_layer_cl=None ): super().__init__() self.grad_checkpointing = False if in_chs != out_chs or stride > 1 or dilation[0] != dilation[1]: ds_ks = 2 if stride > 1 or dilation[0] != dilation[1] else 1 pad = 'same' if dilation[1] > 1 else 0 # same padding needed if dilation used self.downsample = nn.Sequential( norm_layer(in_chs), create_conv2d( in_chs, out_chs, kernel_size=ds_ks, stride=stride, dilation=dilation[0], padding=pad, bias=conv_bias, ), ) in_chs = out_chs else: self.downsample = nn.Identity() drop_path_rates = drop_path_rates or [0.] * depth stage_blocks = [] for i in range(depth): stage_blocks.append(ConvNeXtBlock( in_chs=in_chs, out_chs=out_chs, kernel_size=kernel_size, dilation=dilation[1], drop_path=drop_path_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer if conv_mlp else norm_layer_cl, )) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class ConvNeXt(nn.Module): r""" ConvNeXt A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', output_stride: int = 32, depths: Tuple[int, ...] = (3, 3, 9, 3), dims: Tuple[int, ...] = (96, 192, 384, 768), kernel_sizes: Union[int, Tuple[int, ...]] = 7, ls_init_value: Optional[float] = 1e-6, stem_type: str = 'patch', patch_size: int = 4, head_init_scale: float = 1., head_norm_first: bool = False, head_hidden_size: Optional[int] = None, conv_mlp: bool = False, conv_bias: bool = True, use_grn: bool = False, act_layer: Union[str, Callable] = 'gelu', norm_layer: Optional[Union[str, Callable]] = None, norm_eps: Optional[float] = None, drop_rate: float = 0., drop_path_rate: float = 0., ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). depths: Number of blocks at each stage. dims: Feature dimension at each stage. kernel_sizes: Depthwise convolution kernel-sizes for each stage. ls_init_value: Init value for Layer Scale, disabled if None. stem_type: Type of stem. patch_size: Stem patch size for patch stem. head_init_scale: Init scaling value for classifier weights and biases. head_norm_first: Apply normalization before global pool + head. head_hidden_size: Size of MLP hidden layer in head if not None and head_norm_first == False. conv_mlp: Use 1x1 conv in MLP, improves speed for small networks w/ chan last. conv_bias: Use bias layers w/ all convolutions. use_grn: Use Global Response Norm (ConvNeXt-V2) in MLP. act_layer: Activation layer type. norm_layer: Normalization layer type. drop_rate: Head pre-classifier dropout rate. drop_path_rate: Stochastic depth drop rate. """ super().__init__() assert output_stride in (8, 16, 32) kernel_sizes = to_ntuple(4)(kernel_sizes) if norm_layer is None: norm_layer = LayerNorm2d norm_layer_cl = norm_layer if conv_mlp else LayerNorm if norm_eps is not None: norm_layer = partial(norm_layer, eps=norm_eps) norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) else: assert conv_mlp,\ 'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input' norm_layer_cl = norm_layer if norm_eps is not None: norm_layer_cl = partial(norm_layer_cl, eps=norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate self.feature_info = [] assert stem_type in ('patch', 'overlap', 'overlap_tiered') if stem_type == 'patch': # NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4 self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size, bias=conv_bias), norm_layer(dims[0]), ) stem_stride = patch_size else: mid_chs = make_divisible(dims[0] // 2) if 'tiered' in stem_type else dims[0] self.stem = nn.Sequential( nn.Conv2d(in_chans, mid_chs, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.Conv2d(mid_chs, dims[0], kernel_size=3, stride=2, padding=1, bias=conv_bias), norm_layer(dims[0]), ) stem_stride = 4 self.stages = nn.Sequential() dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] prev_chs = dims[0] curr_stride = stem_stride dilation = 1 # 4 feature resolution stages, each consisting of multiple residual blocks for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 if curr_stride >= output_stride and stride > 1: dilation *= stride stride = 1 curr_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 out_chs = dims[i] stages.append(ConvNeXtStage( prev_chs, out_chs, kernel_size=kernel_sizes[i], stride=stride, dilation=(first_dilation, dilation), depth=depths[i], drop_path_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp, conv_bias=conv_bias, use_grn=use_grn, act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, )) prev_chs = out_chs # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = prev_chs # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets # otherwise pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights) if head_norm_first: assert not head_hidden_size self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, act_layer='gelu', ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint if 'model' in state_dict: state_dict = state_dict['model'] out_dict = {} if 'visual.trunk.stem.0.weight' in state_dict: out_dict = {k.replace('visual.trunk.', ''): v for k, v in state_dict.items() if k.startswith('visual.trunk.')} if 'visual.head.proj.weight' in state_dict: out_dict['head.fc.weight'] = state_dict['visual.head.proj.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.proj.weight'].shape[0]) elif 'visual.head.mlp.fc1.weight' in state_dict: out_dict['head.pre_logits.fc.weight'] = state_dict['visual.head.mlp.fc1.weight'] out_dict['head.pre_logits.fc.bias'] = state_dict['visual.head.mlp.fc1.bias'] out_dict['head.fc.weight'] = state_dict['visual.head.mlp.fc2.weight'] out_dict['head.fc.bias'] = torch.zeros(state_dict['visual.head.mlp.fc2.weight'].shape[0]) return out_dict import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') if 'grn' in k: k = k.replace('grn.beta', 'mlp.grn.bias') k = k.replace('grn.gamma', 'mlp.grn.weight') v = v.reshape(v.shape[-1]) k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_convnext(variant, pretrained=False, **kwargs): if kwargs.get('pretrained_cfg', '') == 'fcmae': # NOTE fcmae pretrained weights have no classifier or final norm-layer (`head.norm`) # This is workaround loading with num_classes=0 w/o removing norm-layer. kwargs.setdefault('pretrained_strict', False) model = build_model_with_cfg( ConvNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } def _cfgv2(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', 'license': 'cc-by-nc-4.0', 'paper_ids': 'arXiv:2301.00808', 'paper_name': 'ConvNeXt-V2: Co-designing and Scaling ConvNets with Masked Autoencoders', 'origin_url': 'https://github.com/facebookresearch/ConvNeXt-V2', **kwargs } default_cfgs = generate_default_cfgs({ # timm specific variants 'convnext_tiny.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_atto.d2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_d2-01bb0f51.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_atto_ols.a2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_atto_ols_a2-78d1c8f3.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_d1-d71d5b4c.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_femto_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_femto_ols_d1-246bf2ed.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_d1-10ad7f0d.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnext_pico_ols.d1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_pico_ols_d1-611f0ca7.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_d1h-7eb4bdea.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_nano_ols.d1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_nano_ols_d1h-ae424a9a.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny_hnf.a2h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/convnext_tiny_hnf_a2h-ab7e9df2.pth', hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_nano.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_small.in12k': _cfg( hf_hub_id='timm/', crop_pct=0.95, num_classes=11821), 'convnext_tiny.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_xlarge.fb_in22k_ft_in1k': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_small.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_base.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_large.fb_in1k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnext_tiny.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_small.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_xlarge.fb_in22k_ft_in1k_384': _cfg( url='https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_tiny.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_small.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_base.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_large.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnext_xlarge.fb_in22k': _cfg( url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", hf_hub_id='timm/', num_classes=21841), 'convnextv2_nano.fcmae_ft_in22k_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_nano.fcmae_ft_in22k_in1k_384': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt', hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_tiny.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_base.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_large.fcmae_ft_in22k_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_384': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt", hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnextv2_huge.fcmae_ft_in22k_in1k_512': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt", hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(15, 15), crop_pct=1.0, crop_mode='squash'), 'convnextv2_atto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_femto.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_pico.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'convnextv2_nano.fcmae_ft_in1k': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt', hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_tiny.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_base.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_large.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_huge.fcmae_ft_in1k': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt", hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'convnextv2_atto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_femto.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_pico.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_nano.fcmae': _cfgv2( url='https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt', hf_hub_id='timm/', num_classes=0), 'convnextv2_tiny.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_base.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_large.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_huge.fcmae': _cfgv2( url="https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt", hf_hub_id='timm/', num_classes=0), 'convnextv2_small.untrained': _cfg(), # CLIP weights, fine-tuned on in1k or in12k + in1k 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_base.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laiona_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0 ), 'convnext_large_mlp.clip_laion2b_augreg_ft_in1k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash' ), 'convnext_xxlarge.clip_laion2b_soup_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_base.clip_laion2b_augreg_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_320': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0), 'convnext_large_mlp.clip_laion2b_augreg_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'convnext_large_mlp.clip_laion2b_soup_ft_in12k_384': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821, input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), # CLIP original image tower weights 'convnext_base.clip_laion2b': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laion2b_augreg': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona': _cfg( hf_hub_id='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_320': _cfg( hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_base.clip_laiona_augreg_320': _cfg( hf_hub_id='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=640), 'convnext_large_mlp.clip_laion2b_augreg': _cfg( hf_hub_id='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_320': _cfg( hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_large_mlp.clip_laion2b_ft_soup_320': _cfg( hf_hub_id='laion/CLIP-convnext_large_d_320.laion2B-s29B-b131K-ft-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, num_classes=768), 'convnext_xxlarge.clip_laion2b_soup': _cfg( hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-soup', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), 'convnext_xxlarge.clip_laion2b_rewind': _cfg( hf_hub_id='laion/CLIP-convnext_xxlarge-laion2B-s34B-b82K-augreg-rewind', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, num_classes=1024), }) @register_model def convnext_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True) model = _create_convnext('convnext_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_atto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant with overlapping 3x3 conv stem, wider than non-ols femto above, current param count 3.7M model_args = dict(depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_atto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True) model = _create_convnext('convnext_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_femto_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict(depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_femto_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True) model = _create_convnext('convnext_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_pico_ols(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with overlapping 3x3 conv stem model_args = dict(depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), conv_mlp=True, stem_type='overlap_tiered') model = _create_convnext('convnext_pico_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True) model = _create_convnext('convnext_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_nano_ols(pretrained=False, **kwargs) -> ConvNeXt: # experimental nano variant with overlapping conv stem model_args = dict(depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), conv_mlp=True, stem_type='overlap') model = _create_convnext('convnext_nano_ols', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny_hnf(pretrained=False, **kwargs) -> ConvNeXt: # experimental tiny variant with norm before pooling in head (head norm first) model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), head_norm_first=True, conv_mlp=True) model = _create_convnext('convnext_tiny_hnf', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768)) model = _create_convnext('convnext_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768]) model = _create_convnext('convnext_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024]) model = _create_convnext('convnext_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536]) model = _create_convnext('convnext_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_large_mlp(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], head_hidden_size=1536) model = _create_convnext('convnext_large_mlp', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048]) model = _create_convnext('convnext_xlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnext_xxlarge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 4, 30, 3], dims=[384, 768, 1536, 3072], norm_eps=kwargs.pop('norm_eps', 1e-5)) model = _create_convnext('convnext_xxlarge', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_atto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant (NOTE: still tweaking depths, will vary between 3-4M param, current is 3.7M model_args = dict( depths=(2, 2, 6, 2), dims=(40, 80, 160, 320), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_atto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_femto(pretrained=False, **kwargs) -> ConvNeXt: # timm femto variant model_args = dict( depths=(2, 2, 6, 2), dims=(48, 96, 192, 384), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_femto', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_pico(pretrained=False, **kwargs) -> ConvNeXt: # timm pico variant model_args = dict( depths=(2, 2, 6, 2), dims=(64, 128, 256, 512), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_pico', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_nano(pretrained=False, **kwargs) -> ConvNeXt: # timm nano variant with standard stem and head model_args = dict( depths=(2, 2, 8, 2), dims=(80, 160, 320, 640), use_grn=True, ls_init_value=None, conv_mlp=True) model = _create_convnext('convnextv2_nano', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_tiny(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_tiny', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_small(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_base(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_large(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def convnextv2_huge(pretrained=False, **kwargs) -> ConvNeXt: model_args = dict(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], use_grn=True, ls_init_value=None) model = _create_convnext('convnextv2_huge', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'convnext_tiny_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k', 'convnext_small_in22ft1k': 'convnext_small.fb_in22k_ft_in1k', 'convnext_base_in22ft1k': 'convnext_base.fb_in22k_ft_in1k', 'convnext_large_in22ft1k': 'convnext_large.fb_in22k_ft_in1k', 'convnext_xlarge_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k', 'convnext_tiny_384_in22ft1k': 'convnext_tiny.fb_in22k_ft_in1k_384', 'convnext_small_384_in22ft1k': 'convnext_small.fb_in22k_ft_in1k_384', 'convnext_base_384_in22ft1k': 'convnext_base.fb_in22k_ft_in1k_384', 'convnext_large_384_in22ft1k': 'convnext_large.fb_in22k_ft_in1k_384', 'convnext_xlarge_384_in22ft1k': 'convnext_xlarge.fb_in22k_ft_in1k_384', 'convnext_tiny_in22k': 'convnext_tiny.fb_in22k', 'convnext_small_in22k': 'convnext_small.fb_in22k', 'convnext_base_in22k': 'convnext_base.fb_in22k', 'convnext_large_in22k': 'convnext_large.fb_in22k', 'convnext_xlarge_in22k': 'convnext_xlarge.fb_in22k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/crossvit.py
""" CrossViT Model @inproceedings{ chen2021crossvit, title={{CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification}}, author={Chun-Fu (Richard) Chen and Quanfu Fan and Rameswar Panda}, booktitle={International Conference on Computer Vision (ICCV)}, year={2021} } Paper link: https://arxiv.org/abs/2103.14899 Original code: https://github.com/IBM/CrossViT/blob/main/models/crossvit.py NOTE: model names have been renamed from originals to represent actual input res all *_224 -> *_240 and *_384 -> *_408 Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright IBM All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Modifed from Timm. https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py """ from functools import partial from typing import List from typing import Tuple import torch import torch.hub import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, trunc_normal_, _assert from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._registry import register_model, generate_default_cfgs from .vision_transformer import Block __all__ = ['CrossVit'] # model_registry will add each entrypoint fn to this class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, multi_conv=False): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if multi_conv: if patch_size[0] == 12: self.proj = nn.Sequential( nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=3, padding=0), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1), ) elif patch_size[0] == 16: self.proj = nn.Sequential( nn.Conv2d(in_chans, embed_dim // 4, kernel_size=7, stride=4, padding=3), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 4, embed_dim // 2, kernel_size=3, stride=2, padding=1), nn.ReLU(inplace=True), nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), ) else: self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): B, C, H, W = x.shape # FIXME look at relaxing size constraints _assert(H == self.img_size[0], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") _assert(W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") x = self.proj(x).flatten(2).transpose(1, 2) return x class CrossAttention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights self.scale = head_dim ** -0.5 self.wq = nn.Linear(dim, dim, bias=qkv_bias) self.wk = nn.Linear(dim, dim, bias=qkv_bias) self.wv = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape # B1C -> B1H(C/H) -> BH1(C/H) q = self.wq(x[:, 0:1, ...]).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) # BNC -> BNH(C/H) -> BHN(C/H) k = self.wk(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) # BNC -> BNH(C/H) -> BHN(C/H) v = self.wv(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) attn = (q @ k.transpose(-2, -1)) * self.scale # BH1(C/H) @ BH(C/H)N -> BH1N attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, 1, C) # (BH1N @ BHN(C/H)) -> BH1(C/H) -> B1H(C/H) -> B1C x = self.proj(x) x = self.proj_drop(x) return x class CrossAttentionBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = CrossAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x[:, 0:1, ...] + self.drop_path(self.attn(self.norm1(x))) return x class MultiScaleBlock(nn.Module): def __init__( self, dim, patches, depth, num_heads, mlp_ratio, qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() num_branches = len(dim) self.num_branches = num_branches # different branch could have different embedding size, the first one is the base self.blocks = nn.ModuleList() for d in range(num_branches): tmp = [] for i in range(depth[d]): tmp.append(Block( dim=dim[d], num_heads=num_heads[d], mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, )) if len(tmp) != 0: self.blocks.append(nn.Sequential(*tmp)) if len(self.blocks) == 0: self.blocks = None self.projs = nn.ModuleList() for d in range(num_branches): if dim[d] == dim[(d + 1) % num_branches] and False: tmp = [nn.Identity()] else: tmp = [norm_layer(dim[d]), act_layer(), nn.Linear(dim[d], dim[(d + 1) % num_branches])] self.projs.append(nn.Sequential(*tmp)) self.fusion = nn.ModuleList() for d in range(num_branches): d_ = (d + 1) % num_branches nh = num_heads[d_] if depth[-1] == 0: # backward capability: self.fusion.append( CrossAttentionBlock( dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer, )) else: tmp = [] for _ in range(depth[-1]): tmp.append(CrossAttentionBlock( dim=dim[d_], num_heads=nh, mlp_ratio=mlp_ratio[d], qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[-1], norm_layer=norm_layer, )) self.fusion.append(nn.Sequential(*tmp)) self.revert_projs = nn.ModuleList() for d in range(num_branches): if dim[(d + 1) % num_branches] == dim[d] and False: tmp = [nn.Identity()] else: tmp = [norm_layer(dim[(d + 1) % num_branches]), act_layer(), nn.Linear(dim[(d + 1) % num_branches], dim[d])] self.revert_projs.append(nn.Sequential(*tmp)) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: outs_b = [] for i, block in enumerate(self.blocks): outs_b.append(block(x[i])) # only take the cls token out proj_cls_token = torch.jit.annotate(List[torch.Tensor], []) for i, proj in enumerate(self.projs): proj_cls_token.append(proj(outs_b[i][:, 0:1, ...])) # cross attention outs = [] for i, (fusion, revert_proj) in enumerate(zip(self.fusion, self.revert_projs)): tmp = torch.cat((proj_cls_token[i], outs_b[(i + 1) % self.num_branches][:, 1:, ...]), dim=1) tmp = fusion(tmp) reverted_proj_cls_token = revert_proj(tmp[:, 0:1, ...]) tmp = torch.cat((reverted_proj_cls_token, outs_b[i][:, 1:, ...]), dim=1) outs.append(tmp) return outs def _compute_num_patches(img_size, patches): return [i[0] // p * i[1] // p for i, p in zip(img_size, patches)] @register_notrace_function def scale_image(x, ss: Tuple[int, int], crop_scale: bool = False): # annotations for torchscript """ Pulled out of CrossViT.forward_features to bury conditional logic in a leaf node for FX tracing. Args: x (Tensor): input image ss (tuple[int, int]): height and width to scale to crop_scale (bool): whether to crop instead of interpolate to achieve the desired scale. Defaults to False Returns: Tensor: the "scaled" image batch tensor """ H, W = x.shape[-2:] if H != ss[0] or W != ss[1]: if crop_scale and ss[0] <= H and ss[1] <= W: cu, cl = int(round((H - ss[0]) / 2.)), int(round((W - ss[1]) / 2.)) x = x[:, :, cu:cu + ss[0], cl:cl + ss[1]] else: x = torch.nn.functional.interpolate(x, size=ss, mode='bicubic', align_corners=False) return x class CrossVit(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__( self, img_size=224, img_scale=(1.0, 1.0), patch_size=(8, 16), in_chans=3, num_classes=1000, embed_dim=(192, 384), depth=((1, 3, 1), (1, 3, 1), (1, 3, 1)), num_heads=(6, 12), mlp_ratio=(2., 2., 4.), multi_conv=False, crop_scale=False, qkv_bias=True, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), global_pool='token', ): super().__init__() assert global_pool in ('token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.img_size = to_2tuple(img_size) img_scale = to_2tuple(img_scale) self.img_size_scaled = [tuple([int(sj * si) for sj in self.img_size]) for si in img_scale] self.crop_scale = crop_scale # crop instead of interpolate for scale num_patches = _compute_num_patches(self.img_size_scaled, patch_size) self.num_branches = len(patch_size) self.embed_dim = embed_dim self.num_features = sum(embed_dim) self.patch_embed = nn.ModuleList() # hard-coded for torch jit script for i in range(self.num_branches): setattr(self, f'pos_embed_{i}', nn.Parameter(torch.zeros(1, 1 + num_patches[i], embed_dim[i]))) setattr(self, f'cls_token_{i}', nn.Parameter(torch.zeros(1, 1, embed_dim[i]))) for im_s, p, d in zip(self.img_size_scaled, patch_size, embed_dim): self.patch_embed.append( PatchEmbed( img_size=im_s, patch_size=p, in_chans=in_chans, embed_dim=d, multi_conv=multi_conv, )) self.pos_drop = nn.Dropout(p=pos_drop_rate) total_depth = sum([sum(x[-2:]) for x in depth]) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)] # stochastic depth decay rule dpr_ptr = 0 self.blocks = nn.ModuleList() for idx, block_cfg in enumerate(depth): curr_depth = max(block_cfg[:-1]) + block_cfg[-1] dpr_ = dpr[dpr_ptr:dpr_ptr + curr_depth] blk = MultiScaleBlock( embed_dim, num_patches, block_cfg, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer, ) dpr_ptr += curr_depth self.blocks.append(blk) self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.ModuleList([ nn.Linear(embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in range(self.num_branches)]) for i in range(self.num_branches): trunc_normal_(getattr(self, f'pos_embed_{i}'), std=.02) trunc_normal_(getattr(self, f'cls_token_{i}'), std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): out = set() for i in range(self.num_branches): out.add(f'cls_token_{i}') pe = getattr(self, f'pos_embed_{i}', None) if pe is not None and pe.requires_grad: out.add(f'pos_embed_{i}') return out @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('token', 'avg') self.global_pool = global_pool self.head = nn.ModuleList( [nn.Linear(self.embed_dim[i], num_classes) if num_classes > 0 else nn.Identity() for i in range(self.num_branches)]) def forward_features(self, x) -> List[torch.Tensor]: B = x.shape[0] xs = [] for i, patch_embed in enumerate(self.patch_embed): x_ = x ss = self.img_size_scaled[i] x_ = scale_image(x_, ss, self.crop_scale) x_ = patch_embed(x_) cls_tokens = self.cls_token_0 if i == 0 else self.cls_token_1 # hard-coded for torch jit script cls_tokens = cls_tokens.expand(B, -1, -1) x_ = torch.cat((cls_tokens, x_), dim=1) pos_embed = self.pos_embed_0 if i == 0 else self.pos_embed_1 # hard-coded for torch jit script x_ = x_ + pos_embed x_ = self.pos_drop(x_) xs.append(x_) for i, blk in enumerate(self.blocks): xs = blk(xs) # NOTE: was before branch token section, move to here to assure all branch token are before layer norm xs = [norm(xs[i]) for i, norm in enumerate(self.norm)] return xs def forward_head(self, xs: List[torch.Tensor], pre_logits: bool = False) -> torch.Tensor: xs = [x[:, 1:].mean(dim=1) for x in xs] if self.global_pool == 'avg' else [x[:, 0] for x in xs] xs = [self.head_drop(x) for x in xs] if pre_logits or isinstance(self.head[0], nn.Identity): return torch.cat([x for x in xs], dim=1) return torch.mean(torch.stack([head(xs[i]) for i, head in enumerate(self.head)], dim=0), dim=0) def forward(self, x): xs = self.forward_features(x) x = self.forward_head(xs) return x def _create_crossvit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') def pretrained_filter_fn(state_dict): new_state_dict = {} for key in state_dict.keys(): if 'pos_embed' in key or 'cls_token' in key: new_key = key.replace(".", "_") else: new_key = key new_state_dict[new_key] = state_dict[key] return new_state_dict return build_model_with_cfg( CrossVit, variant, pretrained, pretrained_filter_fn=pretrained_filter_fn, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 240, 240), 'pool_size': None, 'crop_pct': 0.875, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'fixed_input_size': True, 'first_conv': ('patch_embed.0.proj', 'patch_embed.1.proj'), 'classifier': ('head.0', 'head.1'), **kwargs } default_cfgs = generate_default_cfgs({ 'crossvit_15_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_15_dagger_240.in1k': _cfg( hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), ), 'crossvit_15_dagger_408.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, ), 'crossvit_18_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_18_dagger_240.in1k': _cfg( hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), ), 'crossvit_18_dagger_408.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 408, 408), first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), crop_pct=1.0, ), 'crossvit_9_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_9_dagger_240.in1k': _cfg( hf_hub_id='timm/', first_conv=('patch_embed.0.proj.0', 'patch_embed.1.proj.0'), ), 'crossvit_base_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_small_240.in1k': _cfg(hf_hub_id='timm/'), 'crossvit_tiny_240.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def crossvit_tiny_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[96, 192], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[3, 3], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_tiny_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_small_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[6, 6], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_small_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_base_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[384, 768], depth=[[1, 4, 0], [1, 4, 0], [1, 4, 0]], num_heads=[12, 12], mlp_ratio=[4, 4, 1]) model = _create_crossvit(variant='crossvit_base_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_9_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], num_heads=[4, 4], mlp_ratio=[3, 3, 1]) model = _create_crossvit(variant='crossvit_9_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1]) model = _create_crossvit(variant='crossvit_15_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], **kwargs) model = _create_crossvit(variant='crossvit_18_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_9_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224 / 240), patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]], num_heads=[4, 4], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_9_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_15_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_15_dagger_408(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]], num_heads=[6, 6], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_15_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_dagger_240(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 224/240), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_18_dagger_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def crossvit_18_dagger_408(pretrained=False, **kwargs) -> CrossVit: model_args = dict( img_scale=(1.0, 384/408), patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]], num_heads=[7, 7], mlp_ratio=[3, 3, 1], multi_conv=True) model = _create_crossvit(variant='crossvit_18_dagger_408', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/cspnet.py
"""PyTorch CspNet A PyTorch implementation of Cross Stage Partial Networks including: * CSPResNet50 * CSPResNeXt50 * CSPDarkNet53 * and DarkNet53 for good measure Based on paper `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 Reference impl via darknet cfg files at https://github.com/WongKinYiu/CrossStagePartialNetworks Hacked together by / Copyright 2020 Ross Wightman """ from dataclasses import dataclass, asdict, replace from functools import partial from typing import Any, Dict, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, ConvNormAct, ConvNormActAa, DropPath, get_attn, create_act_layer, make_divisible from ._builder import build_model_with_cfg from ._manipulate import named_apply, MATCH_PREV_GROUP from ._registry import register_model, generate_default_cfgs __all__ = ['CspNet'] # model_registry will add each entrypoint fn to this @dataclass class CspStemCfg: out_chs: Union[int, Tuple[int, ...]] = 32 stride: Union[int, Tuple[int, ...]] = 2 kernel_size: int = 3 padding: Union[int, str] = '' pool: Optional[str] = '' def _pad_arg(x, n): # pads an argument tuple to specified n by padding with last value if not isinstance(x, (tuple, list)): x = (x,) curr_n = len(x) pad_n = n - curr_n if pad_n <= 0: return x[:n] return tuple(x + (x[-1],) * pad_n) @dataclass class CspStagesCfg: depth: Tuple[int, ...] = (3, 3, 5, 2) # block depth (number of block repeats in stages) out_chs: Tuple[int, ...] = (128, 256, 512, 1024) # number of output channels for blocks in stage stride: Union[int, Tuple[int, ...]] = 2 # stride of stage groups: Union[int, Tuple[int, ...]] = 1 # num kxk conv groups block_ratio: Union[float, Tuple[float, ...]] = 1.0 bottle_ratio: Union[float, Tuple[float, ...]] = 1. # bottleneck-ratio of blocks in stage avg_down: Union[bool, Tuple[bool, ...]] = False attn_layer: Optional[Union[str, Tuple[str, ...]]] = None attn_kwargs: Optional[Union[Dict, Tuple[Dict]]] = None stage_type: Union[str, Tuple[str]] = 'csp' # stage type ('csp', 'cs2', 'dark') block_type: Union[str, Tuple[str]] = 'bottle' # blocks type for stages ('bottle', 'dark') # cross-stage only expand_ratio: Union[float, Tuple[float, ...]] = 1.0 cross_linear: Union[bool, Tuple[bool, ...]] = False down_growth: Union[bool, Tuple[bool, ...]] = False def __post_init__(self): n = len(self.depth) assert len(self.out_chs) == n self.stride = _pad_arg(self.stride, n) self.groups = _pad_arg(self.groups, n) self.block_ratio = _pad_arg(self.block_ratio, n) self.bottle_ratio = _pad_arg(self.bottle_ratio, n) self.avg_down = _pad_arg(self.avg_down, n) self.attn_layer = _pad_arg(self.attn_layer, n) self.attn_kwargs = _pad_arg(self.attn_kwargs, n) self.stage_type = _pad_arg(self.stage_type, n) self.block_type = _pad_arg(self.block_type, n) self.expand_ratio = _pad_arg(self.expand_ratio, n) self.cross_linear = _pad_arg(self.cross_linear, n) self.down_growth = _pad_arg(self.down_growth, n) @dataclass class CspModelCfg: stem: CspStemCfg stages: CspStagesCfg zero_init_last: bool = True # zero init last weight (usually bn) in residual path act_layer: str = 'leaky_relu' norm_layer: str = 'batchnorm' aa_layer: Optional[str] = None # FIXME support string factory for this def _cs3_cfg( width_multiplier=1.0, depth_multiplier=1.0, avg_down=False, act_layer='silu', focus=False, attn_layer=None, attn_kwargs=None, bottle_ratio=1.0, block_type='dark', ): if focus: stem_cfg = CspStemCfg( out_chs=make_divisible(64 * width_multiplier), kernel_size=6, stride=2, padding=2, pool='') else: stem_cfg = CspStemCfg( out_chs=tuple([make_divisible(c * width_multiplier) for c in (32, 64)]), kernel_size=3, stride=2, pool='') return CspModelCfg( stem=stem_cfg, stages=CspStagesCfg( out_chs=tuple([make_divisible(c * width_multiplier) for c in (128, 256, 512, 1024)]), depth=tuple([int(d * depth_multiplier) for d in (3, 6, 9, 3)]), stride=2, bottle_ratio=bottle_ratio, block_ratio=0.5, avg_down=avg_down, attn_layer=attn_layer, attn_kwargs=attn_kwargs, stage_type='cs3', block_type=block_type, ), act_layer=act_layer, ) class BottleneckBlock(nn.Module): """ ResNe(X)t Bottleneck Block """ def __init__( self, in_chs, out_chs, dilation=1, bottle_ratio=0.25, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_last=False, attn_layer=None, drop_block=None, drop_path=0. ): super(BottleneckBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) attn_last = attn_layer is not None and attn_last attn_first = attn_layer is not None and not attn_last self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) self.conv2 = ConvNormAct( mid_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.attn2 = attn_layer(mid_chs, act_layer=act_layer) if attn_first else nn.Identity() self.conv3 = ConvNormAct(mid_chs, out_chs, kernel_size=1, apply_act=False, **ckwargs) self.attn3 = attn_layer(out_chs, act_layer=act_layer) if attn_last else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() self.act3 = create_act_layer(act_layer) def zero_init_last(self): nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.attn2(x) x = self.conv3(x) x = self.attn3(x) x = self.drop_path(x) + shortcut # FIXME partial shortcut needed if first block handled as per original, not used for my current impl #x[:, :shortcut.size(1)] += shortcut x = self.act3(x) return x class DarkBlock(nn.Module): """ DarkNet Block """ def __init__( self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, drop_block=None, drop_path=0. ): super(DarkBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct(in_chs, mid_chs, kernel_size=1, **ckwargs) self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() self.conv2 = ConvNormAct( mid_chs, out_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.attn(x) x = self.conv2(x) x = self.drop_path(x) + shortcut return x class EdgeBlock(nn.Module): """ EdgeResidual / Fused-MBConv / MobileNetV1-like 3x3 + 1x1 block (w/ activated output) """ def __init__( self, in_chs, out_chs, dilation=1, bottle_ratio=0.5, groups=1, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, drop_block=None, drop_path=0. ): super(EdgeBlock, self).__init__() mid_chs = int(round(out_chs * bottle_ratio)) ckwargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct( in_chs, mid_chs, kernel_size=3, dilation=dilation, groups=groups, drop_layer=drop_block, **ckwargs) self.attn = attn_layer(mid_chs, act_layer=act_layer) if attn_layer is not None else nn.Identity() self.conv2 = ConvNormAct(mid_chs, out_chs, kernel_size=1, **ckwargs) self.drop_path = DropPath(drop_path) if drop_path else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.attn(x) x = self.conv2(x) x = self.drop_path(x) + shortcut return x class CrossStage(nn.Module): """Cross Stage.""" def __init__( self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., expand_ratio=1., groups=1, first_dilation=None, avg_down=False, down_growth=False, cross_linear=False, block_dpr=None, block_fn=BottleneckBlock, **block_kwargs, ): super(CrossStage, self).__init__() first_dilation = first_dilation or dilation down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) block_out_chs = int(round(out_chs * block_ratio)) conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if stride != 1 or first_dilation != dilation: if avg_down: self.conv_down = nn.Sequential( nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) ) else: self.conv_down = ConvNormActAa( in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = down_chs else: self.conv_down = nn.Identity() prev_chs = in_chs # FIXME this 1x1 expansion is pushed down into the cross and block paths in the darknet cfgs. Also, # there is also special case for the first stage for some of the model that results in uneven split # across the two paths. I did it this way for simplicity for now. self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) prev_chs = exp_chs // 2 # output of conv_exp is always split in two self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn( in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0., **block_kwargs, )) prev_chs = block_out_chs # transition convs self.conv_transition_b = ConvNormAct(prev_chs, exp_chs // 2, kernel_size=1, **conv_kwargs) self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) def forward(self, x): x = self.conv_down(x) x = self.conv_exp(x) xs, xb = x.split(self.expand_chs // 2, dim=1) xb = self.blocks(xb) xb = self.conv_transition_b(xb).contiguous() out = self.conv_transition(torch.cat([xs, xb], dim=1)) return out class CrossStage3(nn.Module): """Cross Stage 3. Similar to CrossStage, but with only one transition conv for the output. """ def __init__( self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., expand_ratio=1., groups=1, first_dilation=None, avg_down=False, down_growth=False, cross_linear=False, block_dpr=None, block_fn=BottleneckBlock, **block_kwargs, ): super(CrossStage3, self).__init__() first_dilation = first_dilation or dilation down_chs = out_chs if down_growth else in_chs # grow downsample channels to output channels self.expand_chs = exp_chs = int(round(out_chs * expand_ratio)) block_out_chs = int(round(out_chs * block_ratio)) conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if stride != 1 or first_dilation != dilation: if avg_down: self.conv_down = nn.Sequential( nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) ) else: self.conv_down = ConvNormActAa( in_chs, down_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = down_chs else: self.conv_down = None prev_chs = in_chs # expansion conv self.conv_exp = ConvNormAct(prev_chs, exp_chs, kernel_size=1, apply_act=not cross_linear, **conv_kwargs) prev_chs = exp_chs // 2 # expanded output is split in 2 for blocks and cross stage self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn( in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0., **block_kwargs, )) prev_chs = block_out_chs # transition convs self.conv_transition = ConvNormAct(exp_chs, out_chs, kernel_size=1, **conv_kwargs) def forward(self, x): x = self.conv_down(x) x = self.conv_exp(x) x1, x2 = x.split(self.expand_chs // 2, dim=1) x1 = self.blocks(x1) out = self.conv_transition(torch.cat([x1, x2], dim=1)) return out class DarkStage(nn.Module): """DarkNet stage.""" def __init__( self, in_chs, out_chs, stride, dilation, depth, block_ratio=1., bottle_ratio=1., groups=1, first_dilation=None, avg_down=False, block_fn=BottleneckBlock, block_dpr=None, **block_kwargs, ): super(DarkStage, self).__init__() first_dilation = first_dilation or dilation conv_kwargs = dict(act_layer=block_kwargs.get('act_layer'), norm_layer=block_kwargs.get('norm_layer')) aa_layer = block_kwargs.pop('aa_layer', None) if avg_down: self.conv_down = nn.Sequential( nn.AvgPool2d(2) if stride == 2 else nn.Identity(), # FIXME dilation handling ConvNormActAa(in_chs, out_chs, kernel_size=1, stride=1, groups=groups, **conv_kwargs) ) else: self.conv_down = ConvNormActAa( in_chs, out_chs, kernel_size=3, stride=stride, dilation=first_dilation, groups=groups, aa_layer=aa_layer, **conv_kwargs) prev_chs = out_chs block_out_chs = int(round(out_chs * block_ratio)) self.blocks = nn.Sequential() for i in range(depth): self.blocks.add_module(str(i), block_fn( in_chs=prev_chs, out_chs=block_out_chs, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, drop_path=block_dpr[i] if block_dpr is not None else 0., **block_kwargs )) prev_chs = block_out_chs def forward(self, x): x = self.conv_down(x) x = self.blocks(x) return x def create_csp_stem( in_chans=3, out_chs=32, kernel_size=3, stride=2, pool='', padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, ): stem = nn.Sequential() feature_info = [] if not isinstance(out_chs, (tuple, list)): out_chs = [out_chs] stem_depth = len(out_chs) assert stem_depth assert stride in (1, 2, 4) prev_feat = None prev_chs = in_chans last_idx = stem_depth - 1 stem_stride = 1 for i, chs in enumerate(out_chs): conv_name = f'conv{i + 1}' conv_stride = 2 if (i == 0 and stride > 1) or (i == last_idx and stride > 2 and not pool) else 1 if conv_stride > 1 and prev_feat is not None: feature_info.append(prev_feat) stem.add_module(conv_name, ConvNormAct( prev_chs, chs, kernel_size, stride=conv_stride, padding=padding if i == 0 else '', act_layer=act_layer, norm_layer=norm_layer, )) stem_stride *= conv_stride prev_chs = chs prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', conv_name])) if pool: assert stride > 2 if prev_feat is not None: feature_info.append(prev_feat) if aa_layer is not None: stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) stem.add_module('aa', aa_layer(channels=prev_chs, stride=2)) pool_name = 'aa' else: stem.add_module('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) pool_name = 'pool' stem_stride *= 2 prev_feat = dict(num_chs=prev_chs, reduction=stem_stride, module='.'.join(['stem', pool_name])) feature_info.append(prev_feat) return stem, feature_info def _get_stage_fn(stage_args): stage_type = stage_args.pop('stage_type') assert stage_type in ('dark', 'csp', 'cs3') if stage_type == 'dark': stage_args.pop('expand_ratio', None) stage_args.pop('cross_linear', None) stage_args.pop('down_growth', None) stage_fn = DarkStage elif stage_type == 'csp': stage_fn = CrossStage else: stage_fn = CrossStage3 return stage_fn, stage_args def _get_block_fn(stage_args): block_type = stage_args.pop('block_type') assert block_type in ('dark', 'edge', 'bottle') if block_type == 'dark': return DarkBlock, stage_args elif block_type == 'edge': return EdgeBlock, stage_args else: return BottleneckBlock, stage_args def _get_attn_fn(stage_args): attn_layer = stage_args.pop('attn_layer') attn_kwargs = stage_args.pop('attn_kwargs', None) or {} if attn_layer is not None: attn_layer = get_attn(attn_layer) if attn_kwargs: attn_layer = partial(attn_layer, **attn_kwargs) return attn_layer, stage_args def create_csp_stages( cfg: CspModelCfg, drop_path_rate: float, output_stride: int, stem_feat: Dict[str, Any], ): cfg_dict = asdict(cfg.stages) num_stages = len(cfg.stages.depth) cfg_dict['block_dpr'] = [None] * num_stages if not drop_path_rate else \ [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.stages.depth)).split(cfg.stages.depth)] stage_args = [dict(zip(cfg_dict.keys(), values)) for values in zip(*cfg_dict.values())] block_kwargs = dict( act_layer=cfg.act_layer, norm_layer=cfg.norm_layer, ) dilation = 1 net_stride = stem_feat['reduction'] prev_chs = stem_feat['num_chs'] prev_feat = stem_feat feature_info = [] stages = [] for stage_idx, stage_args in enumerate(stage_args): stage_fn, stage_args = _get_stage_fn(stage_args) block_fn, stage_args = _get_block_fn(stage_args) attn_fn, stage_args = _get_attn_fn(stage_args) stride = stage_args.pop('stride') if stride != 1 and prev_feat: feature_info.append(prev_feat) if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 stages += [stage_fn( prev_chs, **stage_args, stride=stride, first_dilation=first_dilation, dilation=dilation, block_fn=block_fn, aa_layer=cfg.aa_layer, attn_layer=attn_fn, # will be passed through stage as block_kwargs **block_kwargs, )] prev_chs = stage_args['out_chs'] prev_feat = dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}') feature_info.append(prev_feat) return nn.Sequential(*stages), feature_info class CspNet(nn.Module): """Cross Stage Partial base model. Paper: `CSPNet: A New Backbone that can Enhance Learning Capability of CNN` - https://arxiv.org/abs/1911.11929 Ref Impl: https://github.com/WongKinYiu/CrossStagePartialNetworks NOTE: There are differences in the way I handle the 1x1 'expansion' conv in this impl vs the darknet impl. I did it this way for simplicity and less special cases. """ def __init__( self, cfg: CspModelCfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., drop_path_rate=0., zero_init_last=True, **kwargs, ): """ Args: cfg (CspModelCfg): Model architecture configuration in_chans (int): Number of input channels (default: 3) num_classes (int): Number of classifier classes (default: 1000) output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) global_pool (str): Global pooling type (default: 'avg') drop_rate (float): Dropout rate (default: 0.) drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) zero_init_last (bool): Zero-init last weight of residual path kwargs (dict): Extra kwargs overlayed onto cfg """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride in (8, 16, 32) cfg = replace(cfg, **kwargs) # overlay kwargs onto cfg layer_args = dict( act_layer=cfg.act_layer, norm_layer=cfg.norm_layer, aa_layer=cfg.aa_layer ) self.feature_info = [] # Construct the stem self.stem, stem_feat_info = create_csp_stem(in_chans, **asdict(cfg.stem), **layer_args) self.feature_info.extend(stem_feat_info[:-1]) # Construct the stages self.stages, stage_feat_info = create_csp_stages( cfg, drop_path_rate=drop_path_rate, output_stride=output_stride, stem_feat=stem_feat_info[-1], ) prev_chs = stage_feat_info[-1]['num_chs'] self.feature_info.extend(stage_feat_info) # Construct the head self.num_features = prev_chs self.head = ClassifierHead( in_features=prev_chs, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate) named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^stages\.(\d+)\..*transition', MATCH_PREV_GROUP), # map to last block in stage (r'^stages\.(\d+)', (0,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) def forward_features(self, x): x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name, zero_init_last=False): if isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() model_cfgs = dict( cspresnet50=CspModelCfg( stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), stages=CspStagesCfg( depth=(3, 3, 5, 2), out_chs=(128, 256, 512, 1024), stride=(1, 2), expand_ratio=2., bottle_ratio=0.5, cross_linear=True, ), ), cspresnet50d=CspModelCfg( stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), stages=CspStagesCfg( depth=(3, 3, 5, 2), out_chs=(128, 256, 512, 1024), stride=(1,) + (2,), expand_ratio=2., bottle_ratio=0.5, block_ratio=1., cross_linear=True, ), ), cspresnet50w=CspModelCfg( stem=CspStemCfg(out_chs=(32, 32, 64), kernel_size=3, stride=4, pool='max'), stages=CspStagesCfg( depth=(3, 3, 5, 2), out_chs=(256, 512, 1024, 2048), stride=(1,) + (2,), expand_ratio=1., bottle_ratio=0.25, block_ratio=0.5, cross_linear=True, ), ), cspresnext50=CspModelCfg( stem=CspStemCfg(out_chs=64, kernel_size=7, stride=4, pool='max'), stages=CspStagesCfg( depth=(3, 3, 5, 2), out_chs=(256, 512, 1024, 2048), stride=(1,) + (2,), groups=32, expand_ratio=1., bottle_ratio=1., block_ratio=0.5, cross_linear=True, ), ), cspdarknet53=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, expand_ratio=(2.,) + (1.,), bottle_ratio=(0.5,) + (1.,), block_ratio=(1.,) + (0.5,), down_growth=True, block_type='dark', ), ), darknet17=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1,) * 5, out_chs=(64, 128, 256, 512, 1024), stride=(2,), bottle_ratio=(0.5,), block_ratio=(1.,), stage_type='dark', block_type='dark', ), ), darknet21=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1, 1, 1, 2, 2), out_chs=(64, 128, 256, 512, 1024), stride=(2,), bottle_ratio=(0.5,), block_ratio=(1.,), stage_type='dark', block_type='dark', ), ), sedarknet21=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1, 1, 1, 2, 2), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1., attn_layer='se', stage_type='dark', block_type='dark', ), ), darknet53=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1., stage_type='dark', block_type='dark', ), ), darknetaa53=CspModelCfg( stem=CspStemCfg(out_chs=32, kernel_size=3, stride=1, pool=''), stages=CspStagesCfg( depth=(1, 2, 8, 8, 4), out_chs=(64, 128, 256, 512, 1024), stride=2, bottle_ratio=0.5, block_ratio=1., avg_down=True, stage_type='dark', block_type='dark', ), ), cs3darknet_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5), cs3darknet_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67), cs3darknet_l=_cs3_cfg(), cs3darknet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33), cs3darknet_focus_s=_cs3_cfg(width_multiplier=0.5, depth_multiplier=0.5, focus=True), cs3darknet_focus_m=_cs3_cfg(width_multiplier=0.75, depth_multiplier=0.67, focus=True), cs3darknet_focus_l=_cs3_cfg(focus=True), cs3darknet_focus_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, focus=True), cs3sedarknet_l=_cs3_cfg(attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), cs3sedarknet_x=_cs3_cfg(attn_layer='se', width_multiplier=1.25, depth_multiplier=1.33), cs3sedarknet_xdw=CspModelCfg( stem=CspStemCfg(out_chs=(32, 64), kernel_size=3, stride=2, pool=''), stages=CspStagesCfg( depth=(3, 6, 12, 4), out_chs=(256, 512, 1024, 2048), stride=2, groups=(1, 1, 256, 512), bottle_ratio=0.5, block_ratio=0.5, attn_layer='se', ), act_layer='silu', ), cs3edgenet_x=_cs3_cfg(width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge'), cs3se_edgenet_x=_cs3_cfg( width_multiplier=1.25, depth_multiplier=1.33, bottle_ratio=1.5, block_type='edge', attn_layer='se', attn_kwargs=dict(rd_ratio=.25)), ) def _create_cspnet(variant, pretrained=False, **kwargs): if variant.startswith('darknet') or variant.startswith('cspdarknet'): # NOTE: DarkNet is one of few models with stride==1 features w/ 6 out_indices [0..5] default_out_indices = (0, 1, 2, 3, 4, 5) else: default_out_indices = (0, 1, 2, 3, 4) out_indices = kwargs.pop('out_indices', default_out_indices) return build_model_with_cfg( CspNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.887, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'cspresnet50.ra_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth'), 'cspresnet50d.untrained': _cfg(), 'cspresnet50w.untrained': _cfg(), 'cspresnext50.ra_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth', ), 'cspdarknet53.ra_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth'), 'darknet17.untrained': _cfg(), 'darknet21.untrained': _cfg(), 'sedarknet21.untrained': _cfg(), 'darknet53.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknet53_256_c2ns-3aeff817.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'darknetaa53.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/darknetaa53_c2ns-5c28ec8a.pth', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3darknet_s.untrained': _cfg(interpolation='bicubic'), 'cs3darknet_m.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_m_c2ns-43f06604.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95, ), 'cs3darknet_l.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_l_c2ns-16220c5d.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_x.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_x_c2ns-4e4490aa.pth', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3darknet_focus_s.untrained': _cfg(interpolation='bicubic'), 'cs3darknet_focus_m.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_m_c2ns-e23bed41.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_focus_l.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3darknet_focus_l_c2ns-65ef8888.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3darknet_focus_x.untrained': _cfg(interpolation='bicubic'), 'cs3sedarknet_l.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_l_c2ns-e8d1dc13.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'cs3sedarknet_x.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3sedarknet_x_c2ns-b4d0abc0.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3sedarknet_xdw.untrained': _cfg(interpolation='bicubic'), 'cs3edgenet_x.c2_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3edgenet_x_c2-2e1610a9.pth', interpolation='bicubic', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'cs3se_edgenet_x.c2ns_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/cs3se_edgenet_x_c2ns-76f8e3ac.pth', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0), }) @register_model def cspresnet50(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50', pretrained=pretrained, **kwargs) @register_model def cspresnet50d(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50d', pretrained=pretrained, **kwargs) @register_model def cspresnet50w(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnet50w', pretrained=pretrained, **kwargs) @register_model def cspresnext50(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspresnext50', pretrained=pretrained, **kwargs) @register_model def cspdarknet53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cspdarknet53', pretrained=pretrained, **kwargs) @register_model def darknet17(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet17', pretrained=pretrained, **kwargs) @register_model def darknet21(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet21', pretrained=pretrained, **kwargs) @register_model def sedarknet21(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('sedarknet21', pretrained=pretrained, **kwargs) @register_model def darknet53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknet53', pretrained=pretrained, **kwargs) @register_model def darknetaa53(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('darknetaa53', pretrained=pretrained, **kwargs) @register_model def cs3darknet_s(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_s', pretrained=pretrained, **kwargs) @register_model def cs3darknet_m(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_m', pretrained=pretrained, **kwargs) @register_model def cs3darknet_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_l', pretrained=pretrained, **kwargs) @register_model def cs3darknet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_x', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_s(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_s', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_m(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_m', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_l', pretrained=pretrained, **kwargs) @register_model def cs3darknet_focus_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3darknet_focus_x', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_l(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_l', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_x', pretrained=pretrained, **kwargs) @register_model def cs3sedarknet_xdw(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3sedarknet_xdw', pretrained=pretrained, **kwargs) @register_model def cs3edgenet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3edgenet_x', pretrained=pretrained, **kwargs) @register_model def cs3se_edgenet_x(pretrained=False, **kwargs) -> CspNet: return _create_cspnet('cs3se_edgenet_x', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/davit.py
""" DaViT: Dual Attention Vision Transformers As described in https://arxiv.org/abs/2204.03645 Input size invariant transformer architecture that combines channel and spacial attention in each block. The attention mechanisms used are linear in complexity. DaViT model defs and weights adapted from https://github.com/dingmyu/davit, original copyright below """ # Copyright (c) 2022 Mingyu Ding # All rights reserved. # This source code is licensed under the MIT license from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, trunc_normal_, Mlp, LayerNorm2d, get_norm_layer, use_fused_attn from timm.layers import NormMlpClassifierHead, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['DaVit'] class ConvPosEnc(nn.Module): def __init__(self, dim: int, k: int = 3, act: bool = False): super(ConvPosEnc, self).__init__() self.proj = nn.Conv2d(dim, dim, k, 1, k // 2, groups=dim) self.act = nn.GELU() if act else nn.Identity() def forward(self, x: Tensor): feat = self.proj(x) x = x + self.act(feat) return x class Stem(nn.Module): """ Size-agnostic implementation of 2D image to patch embedding, allowing input size to be adjusted during model forward operation """ def __init__( self, in_chs=3, out_chs=96, stride=4, norm_layer=LayerNorm2d, ): super().__init__() stride = to_2tuple(stride) self.stride = stride self.in_chs = in_chs self.out_chs = out_chs assert stride[0] == 4 # only setup for stride==4 self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=7, stride=stride, padding=3, ) self.norm = norm_layer(out_chs) def forward(self, x: Tensor): B, C, H, W = x.shape x = F.pad(x, (0, (self.stride[1] - W % self.stride[1]) % self.stride[1])) x = F.pad(x, (0, 0, 0, (self.stride[0] - H % self.stride[0]) % self.stride[0])) x = self.conv(x) x = self.norm(x) return x class Downsample(nn.Module): def __init__( self, in_chs, out_chs, norm_layer=LayerNorm2d, ): super().__init__() self.in_chs = in_chs self.out_chs = out_chs self.norm = norm_layer(in_chs) self.conv = nn.Conv2d( in_chs, out_chs, kernel_size=2, stride=2, padding=0, ) def forward(self, x: Tensor): B, C, H, W = x.shape x = self.norm(x) x = F.pad(x, (0, (2 - W % 2) % 2)) x = F.pad(x, (0, 0, 0, (2 - H % 2) % 2)) x = self.conv(x) return x class ChannelAttention(nn.Module): def __init__(self, dim, num_heads=8, qkv_bias=False): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) def forward(self, x: Tensor): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) k = k * self.scale attention = k.transpose(-1, -2) @ v attention = attention.softmax(dim=-1) x = (attention @ q.transpose(-1, -2)).transpose(-1, -2) x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) return x class ChannelBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ffn=True, cpe_act=False, ): super().__init__() self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) self.ffn = ffn self.norm1 = norm_layer(dim) self.attn = ChannelAttention(dim, num_heads=num_heads, qkv_bias=qkv_bias) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) if self.ffn: self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() else: self.norm2 = None self.mlp = None self.drop_path2 = None def forward(self, x: Tensor): B, C, H, W = x.shape x = self.cpe1(x).flatten(2).transpose(1, 2) cur = self.norm1(x) cur = self.attn(cur) x = x + self.drop_path1(cur) x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) if self.mlp is not None: x = x.flatten(2).transpose(1, 2) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.transpose(1, 2).view(B, C, H, W) return x def window_partition(x: Tensor, window_size: Tuple[int, int]): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows: Tensor, window_size: Tuple[int, int], H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowAttention(nn.Module): r""" Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, window_size, num_heads, qkv_bias=True): super().__init__() self.dim = dim self.window_size = window_size self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.proj = nn.Linear(dim, dim) self.softmax = nn.Softmax(dim=-1) def forward(self, x: Tensor): B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) attn = self.softmax(attn) x = attn @ v x = x.transpose(1, 2).reshape(B_, N, C) x = self.proj(x) return x class SpatialBlock(nn.Module): r""" Windows Block. Args: dim (int): Number of input channels. num_heads (int): Number of attention heads. window_size (int): Window size. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True drop_path (float, optional): Stochastic depth rate. Default: 0.0 act_layer (nn.Module, optional): Activation layer. Default: nn.GELU norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ def __init__( self, dim, num_heads, window_size=7, mlp_ratio=4., qkv_bias=True, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ffn=True, cpe_act=False, ): super().__init__() self.dim = dim self.ffn = ffn self.num_heads = num_heads self.window_size = to_2tuple(window_size) self.mlp_ratio = mlp_ratio self.cpe1 = ConvPosEnc(dim=dim, k=3, act=cpe_act) self.norm1 = norm_layer(dim) self.attn = WindowAttention( dim, self.window_size, num_heads=num_heads, qkv_bias=qkv_bias, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.cpe2 = ConvPosEnc(dim=dim, k=3, act=cpe_act) if self.ffn: self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() else: self.norm2 = None self.mlp = None self.drop_path1 = None def forward(self, x: Tensor): B, C, H, W = x.shape shortcut = self.cpe1(x).flatten(2).transpose(1, 2) x = self.norm1(shortcut) x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = x.shape x_windows = window_partition(x, self.window_size) x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) # W-MSA/SW-MSA attn_windows = self.attn(x_windows) # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, Hp, Wp) # if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, C) x = shortcut + self.drop_path1(x) x = self.cpe2(x.transpose(1, 2).view(B, C, H, W)) if self.mlp is not None: x = x.flatten(2).transpose(1, 2) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.transpose(1, 2).view(B, C, H, W) return x class DaVitStage(nn.Module): def __init__( self, in_chs, out_chs, depth=1, downsample=True, attn_types=('spatial', 'channel'), num_heads=3, window_size=7, mlp_ratio=4, qkv_bias=True, drop_path_rates=(0, 0), norm_layer=LayerNorm2d, norm_layer_cl=nn.LayerNorm, ffn=True, cpe_act=False ): super().__init__() self.grad_checkpointing = False # downsample embedding layer at the beginning of each stage if downsample: self.downsample = Downsample(in_chs, out_chs, norm_layer=norm_layer) else: self.downsample = nn.Identity() ''' repeating alternating attention blocks in each stage default: (spatial -> channel) x depth potential opportunity to integrate with a more general version of ByobNet/ByoaNet since the logic is similar ''' stage_blocks = [] for block_idx in range(depth): dual_attention_block = [] for attn_idx, attn_type in enumerate(attn_types): if attn_type == 'spatial': dual_attention_block.append(SpatialBlock( dim=out_chs, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=drop_path_rates[block_idx], norm_layer=norm_layer_cl, ffn=ffn, cpe_act=cpe_act, window_size=window_size, )) elif attn_type == 'channel': dual_attention_block.append(ChannelBlock( dim=out_chs, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path=drop_path_rates[block_idx], norm_layer=norm_layer_cl, ffn=ffn, cpe_act=cpe_act )) stage_blocks.append(nn.Sequential(*dual_attention_block)) self.blocks = nn.Sequential(*stage_blocks) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x: Tensor): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class DaVit(nn.Module): r""" DaViT A PyTorch implementation of `DaViT: Dual Attention Vision Transformers` - https://arxiv.org/abs/2204.03645 Supports arbitrary input sizes and pyramid feature extraction Args: in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 depths (tuple(int)): Number of blocks in each stage. Default: (1, 1, 3, 1) embed_dims (tuple(int)): Patch embedding dimension. Default: (96, 192, 384, 768) num_heads (tuple(int)): Number of attention heads in different layers. Default: (3, 6, 12, 24) window_size (int): Window size. Default: 7 mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4 qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True drop_path_rate (float): Stochastic depth rate. Default: 0.1 norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. """ def __init__( self, in_chans=3, depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24), window_size=7, mlp_ratio=4, qkv_bias=True, norm_layer='layernorm2d', norm_layer_cl='layernorm', norm_eps=1e-5, attn_types=('spatial', 'channel'), ffn=True, cpe_act=False, drop_rate=0., drop_path_rate=0., num_classes=1000, global_pool='avg', head_norm_first=False, ): super().__init__() num_stages = len(embed_dims) assert num_stages == len(num_heads) == len(depths) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) self.num_classes = num_classes self.num_features = embed_dims[-1] self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] self.stem = Stem(in_chans, embed_dims[0], norm_layer=norm_layer) in_chs = embed_dims[0] dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] for stage_idx in range(num_stages): out_chs = embed_dims[stage_idx] stage = DaVitStage( in_chs, out_chs, depth=depths[stage_idx], downsample=stage_idx > 0, attn_types=attn_types, num_heads=num_heads[stage_idx], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_path_rates=dpr[stage_idx], norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, ffn=ffn, cpe_act=cpe_act, ) in_chs = out_chs stages.append(stage) self.feature_info += [dict(num_chs=out_chs, reduction=2, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) # if head_norm_first == true, norm -> global pool -> fc ordering, like most other nets # otherwise pool -> norm -> fc, the default DaViT order, similar to ConvNeXt # FIXME generalize this structure to ClassifierHead if head_norm_first: self.norm_pre = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) else: self.norm_pre = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, norm_layer=norm_layer, ) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, global_pool=global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.head.global_pool(x) x = self.head.norm(x) x = self.head.flatten(x) x = self.head.drop(x) return x if pre_logits else self.head.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap MSFT checkpoints -> timm """ if 'head.fc.weight' in state_dict: return state_dict # non-MSFT checkpoint if 'state_dict' in state_dict: state_dict = state_dict['state_dict'] import re out_dict = {} for k, v in state_dict.items(): k = re.sub(r'patch_embeds.([0-9]+)', r'stages.\1.downsample', k) k = re.sub(r'main_blocks.([0-9]+)', r'stages.\1.blocks', k) k = k.replace('downsample.proj', 'downsample.conv') k = k.replace('stages.0.downsample', 'stem') k = k.replace('head.', 'head.fc.') k = k.replace('norms.', 'head.norm.') k = k.replace('cpe.0', 'cpe1') k = k.replace('cpe.1', 'cpe2') out_dict[k] = v return out_dict def _create_davit(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( DaVit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } # TODO contact authors to get larger pretrained models default_cfgs = generate_default_cfgs({ # official microsoft weights from https://github.com/dingmyu/davit 'davit_tiny.msft_in1k': _cfg( hf_hub_id='timm/'), 'davit_small.msft_in1k': _cfg( hf_hub_id='timm/'), 'davit_base.msft_in1k': _cfg( hf_hub_id='timm/'), 'davit_large': _cfg(), 'davit_huge': _cfg(), 'davit_giant': _cfg(), }) @register_model def davit_tiny(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 3, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24), **kwargs) return _create_davit('davit_tiny', pretrained=pretrained, **model_kwargs) @register_model def davit_small(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 9, 1), embed_dims=(96, 192, 384, 768), num_heads=(3, 6, 12, 24), **kwargs) return _create_davit('davit_small', pretrained=pretrained, **model_kwargs) @register_model def davit_base(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 9, 1), embed_dims=(128, 256, 512, 1024), num_heads=(4, 8, 16, 32), **kwargs) return _create_davit('davit_base', pretrained=pretrained, **model_kwargs) @register_model def davit_large(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 9, 1), embed_dims=(192, 384, 768, 1536), num_heads=(6, 12, 24, 48), **kwargs) return _create_davit('davit_large', pretrained=pretrained, **model_kwargs) @register_model def davit_huge(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 9, 1), embed_dims=(256, 512, 1024, 2048), num_heads=(8, 16, 32, 64), **kwargs) return _create_davit('davit_huge', pretrained=pretrained, **model_kwargs) @register_model def davit_giant(pretrained=False, **kwargs) -> DaVit: model_kwargs = dict( depths=(1, 1, 12, 3), embed_dims=(384, 768, 1536, 3072), num_heads=(12, 24, 48, 96), **kwargs) return _create_davit('davit_giant', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/deit.py
""" DeiT - Data-efficient Image Transformers DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118 Modifications copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. from functools import partial from typing import Sequence, Union import torch from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import resample_abs_pos_embed from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this class VisionTransformerDistilled(VisionTransformer): """ Vision Transformer w/ Distillation Token and Head Distillation token & head support for `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877 """ def __init__(self, *args, **kwargs): weight_init = kwargs.pop('weight_init', '') super().__init__(*args, **kwargs, weight_init='skip') assert self.global_pool in ('token',) self.num_prefix_tokens = 2 self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter( torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim)) self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token self.init_weights(weight_init) def init_weights(self, mode=''): trunc_normal_(self.dist_token, std=.02) super().init_weights(mode=mode) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed|dist_token', blocks=[ (r'^blocks\.(\d+)', None), (r'^norm', (99999,))] # final norm w/ last block ) @torch.jit.ignore def get_classifier(self): return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def _intermediate_layers( self, x: torch.Tensor, n: Union[int, Sequence] = 1, ): outputs, num_blocks = [], len(self.blocks) take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n) # forward pass x = self.patch_embed(x) x = torch.cat(( self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = self.pos_drop(x + self.pos_embed) x = self.patch_drop(x) x = self.norm_pre(x) for i, blk in enumerate(self.blocks): x = blk(x) if i in take_indices: outputs.append(x) return outputs def forward_features(self, x) -> torch.Tensor: x = self.patch_embed(x) x = torch.cat(( self.cls_token.expand(x.shape[0], -1, -1), self.dist_token.expand(x.shape[0], -1, -1), x), dim=1) x = self.pos_drop(x + self.pos_embed) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: x, x_dist = x[:, 0], x[:, 1] if pre_logits: return (x + x_dist) / 2 x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train / finetune, inference average the classifier predictions return (x + x_dist) / 2 def _create_deit(variant, pretrained=False, distilled=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model_cls = VisionTransformerDistilled if distilled else VisionTransformer model = build_model_with_cfg( model_cls, variant, pretrained, pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # deit models (FB weights) 'deit_tiny_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'), 'deit_small_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'), 'deit_base_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'), 'deit_base_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit_tiny_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth', classifier=('head', 'head_dist')), 'deit_small_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth', classifier=('head', 'head_dist')), 'deit_base_distilled_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth', input_size=(3, 384, 384), crop_pct=1.0, classifier=('head', 'head_dist')), 'deit3_small_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'), 'deit3_small_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'), 'deit3_base_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'), 'deit3_base_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'), 'deit3_large_patch16_384.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'), 'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth', crop_pct=1.0), 'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth', crop_pct=1.0), 'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth', crop_pct=1.0), 'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth', input_size=(3, 384, 384), crop_pct=1.0), 'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth', crop_pct=1.0), }) @register_model def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_deit( 'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_deit( 'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit( 'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled: """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_deit( 'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6) model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k', 'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k', 'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k', 'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k', 'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k', 'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k', 'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k' })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/densenet.py
"""Pytorch Densenet implementation w/ tweaks This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with fixed kwargs passthrough and addition of dynamic global avg/max pool. """ import re from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from torch.jit.annotations import List from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier from ._builder import build_model_with_cfg from ._manipulate import MATCH_PREV_GROUP from ._registry import register_model, generate_default_cfgs __all__ = ['DenseNet'] class DenseLayer(nn.Module): def __init__( self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, drop_rate=0., grad_checkpointing=False, ): super(DenseLayer, self).__init__() self.add_module('norm1', norm_layer(num_input_features)), self.add_module('conv1', nn.Conv2d( num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), self.add_module('norm2', norm_layer(bn_size * growth_rate)), self.add_module('conv2', nn.Conv2d( bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), self.drop_rate = float(drop_rate) self.grad_checkpointing = grad_checkpointing def bottleneck_fn(self, xs): # type: (List[torch.Tensor]) -> torch.Tensor concated_features = torch.cat(xs, 1) bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 return bottleneck_output # todo: rewrite when torchscript supports any def any_requires_grad(self, x): # type: (List[torch.Tensor]) -> bool for tensor in x: if tensor.requires_grad: return True return False @torch.jit.unused # noqa: T484 def call_checkpoint_bottleneck(self, x): # type: (List[torch.Tensor]) -> torch.Tensor def closure(*xs): return self.bottleneck_fn(xs) return cp.checkpoint(closure, *x) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass # torchscript does not yet support *args, so we overload method # allowing it to take either a List[Tensor] or single Tensor def forward(self, x): # noqa: F811 if isinstance(x, torch.Tensor): prev_features = [x] else: prev_features = x if self.grad_checkpointing and self.any_requires_grad(prev_features): if torch.jit.is_scripting(): raise Exception("Memory Efficient not supported in JIT") bottleneck_output = self.call_checkpoint_bottleneck(prev_features) else: bottleneck_output = self.bottleneck_fn(prev_features) new_features = self.conv2(self.norm2(bottleneck_output)) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class DenseBlock(nn.ModuleDict): _version = 2 def __init__( self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=BatchNormAct2d, drop_rate=0., grad_checkpointing=False, ): super(DenseBlock, self).__init__() for i in range(num_layers): layer = DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, norm_layer=norm_layer, drop_rate=drop_rate, grad_checkpointing=grad_checkpointing, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.items(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1) class DenseTransition(nn.Sequential): def __init__( self, num_input_features, num_output_features, norm_layer=BatchNormAct2d, aa_layer=None, ): super(DenseTransition, self).__init__() self.add_module('norm', norm_layer(num_input_features)) self.add_module('conv', nn.Conv2d( num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if aa_layer is not None: self.add_module('pool', aa_layer(num_output_features, stride=2)) else: self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): r"""Densenet-BC model class, based on `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: growth_rate (int) - how many filters to add each layer (`k` in paper) block_config (list of 4 ints) - how many layers in each pooling block bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate before classifier layer proj_drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ def __init__( self, growth_rate=32, block_config=(6, 12, 24, 16), num_classes=1000, in_chans=3, global_pool='avg', bn_size=4, stem_type='', act_layer='relu', norm_layer='batchnorm2d', aa_layer=None, drop_rate=0., proj_drop_rate=0., memory_efficient=False, aa_stem_only=True, ): self.num_classes = num_classes super(DenseNet, self).__init__() norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) # Stem deep_stem = 'deep' in stem_type # 3x3 deep stem num_init_features = growth_rate * 2 if aa_layer is None: stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: stem_pool = nn.Sequential(*[ nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=num_init_features, stride=2)]) if deep_stem: stem_chs_1 = stem_chs_2 = growth_rate if 'tiered' in stem_type: stem_chs_1 = 3 * (growth_rate // 4) stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), ('norm0', norm_layer(stem_chs_1)), ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), ('norm1', norm_layer(stem_chs_2)), ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), ('norm2', norm_layer(num_init_features)), ('pool0', stem_pool), ])) else: self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', norm_layer(num_init_features)), ('pool0', stem_pool), ])) self.feature_info = [ dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] current_stride = 4 # DenseBlocks num_features = num_init_features for i, num_layers in enumerate(block_config): block = DenseBlock( num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, norm_layer=norm_layer, drop_rate=proj_drop_rate, grad_checkpointing=memory_efficient, ) module_name = f'denseblock{(i + 1)}' self.features.add_module(module_name, block) num_features = num_features + num_layers * growth_rate transition_aa_layer = None if aa_stem_only else aa_layer if i != len(block_config) - 1: self.feature_info += [ dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] current_stride *= 2 trans = DenseTransition( num_input_features=num_features, num_output_features=num_features // 2, norm_layer=norm_layer, aa_layer=transition_aa_layer, ) self.features.add_module(f'transition{i + 1}', trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', norm_layer(num_features)) self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] self.num_features = num_features # Linear layer global_pool, classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, ) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.classifier = classifier # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]', blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [ (r'^features\.denseblock(\d+)\.denselayer(\d+)', None), (r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for b in self.features.modules(): if isinstance(b, DenseLayer): b.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) x = self.head_drop(x) x = self.classifier(x) return x def _filter_torchvision_pretrained(state_dict): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] return state_dict def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): kwargs['growth_rate'] = growth_rate kwargs['block_config'] = block_config return build_model_with_cfg( DenseNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs, } default_cfgs = generate_default_cfgs({ 'densenet121.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenetblur121d.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenet264d.untrained': _cfg(), 'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet161.tv_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def densenet121(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet121', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, **kwargs) return model @register_model def densenetblur121d(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-121 w/ blur-pooling & 3-layer 3x3 stem `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenetblur121d', growth_rate=32, block_config=(6, 12, 24, 16), pretrained=pretrained, stem_type='deep', aa_layer=BlurPool2d, **kwargs) return model @register_model def densenet169(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-169 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet169', growth_rate=32, block_config=(6, 12, 32, 32), pretrained=pretrained, **kwargs) return model @register_model def densenet201(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-201 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet201', growth_rate=32, block_config=(6, 12, 48, 32), pretrained=pretrained, **kwargs) return model @register_model def densenet161(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-161 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet161', growth_rate=48, block_config=(6, 12, 36, 24), pretrained=pretrained, **kwargs) return model @register_model def densenet264d(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-264 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model = _create_densenet( 'densenet264d', growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep', pretrained=pretrained, **kwargs) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/dla.py
""" Deep Layer Aggregation and DLA w/ Res2Net DLA original adapted from Official Pytorch impl at: https://github.com/ucbdrive/dla DLA Paper: `Deep Layer Aggregation` - https://arxiv.org/abs/1707.06484 Res2Net additions from: https://github.com/gasvn/Res2Net/ Res2Net Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 """ import math from typing import List, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DLA'] class DlaBasic(nn.Module): """DLA Basic""" def __init__(self, inplanes, planes, stride=1, dilation=1, **_): super(DlaBasic, self).__init__() self.conv1 = nn.Conv2d( inplanes, planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=1, padding=dilation, bias=False, dilation=dilation) self.bn2 = nn.BatchNorm2d(planes) self.stride = stride def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += shortcut out = self.relu(out) return out class DlaBottleneck(nn.Module): """DLA/DLA-X Bottleneck""" expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, cardinality=1, base_width=64): super(DlaBottleneck, self).__init__() self.stride = stride mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.conv1 = nn.Conv2d(inplanes, mid_planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.conv2 = nn.Conv2d( mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, bias=False, dilation=dilation, groups=cardinality) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaBottle2neck(nn.Module): """ Res2Net/Res2NeXT DLA Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/dla.py """ expansion = 2 def __init__(self, inplanes, outplanes, stride=1, dilation=1, scale=4, cardinality=8, base_width=4): super(DlaBottle2neck, self).__init__() self.is_first = stride > 1 self.scale = scale mid_planes = int(math.floor(outplanes * (base_width / 64)) * cardinality) mid_planes = mid_planes // self.expansion self.width = mid_planes self.conv1 = nn.Conv2d(inplanes, mid_planes * scale, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes * scale) num_scale_convs = max(1, scale - 1) convs = [] bns = [] for _ in range(num_scale_convs): convs.append(nn.Conv2d( mid_planes, mid_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False)) bns.append(nn.BatchNorm2d(mid_planes)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) if self.is_first else None self.conv3 = nn.Conv2d(mid_planes * scale, outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes) self.relu = nn.ReLU(inplace=True) def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if shortcut is None: shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] # redundant, for torchscript for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: # self.is_first == True, None check for torchscript spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) out += shortcut out = self.relu(out) return out class DlaRoot(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, shortcut): super(DlaRoot, self).__init__() self.conv = nn.Conv2d( in_channels, out_channels, 1, stride=1, bias=False, padding=(kernel_size - 1) // 2) self.bn = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.shortcut = shortcut def forward(self, x_children: List[torch.Tensor]): x = self.conv(torch.cat(x_children, 1)) x = self.bn(x) if self.shortcut: x += x_children[0] x = self.relu(x) return x class DlaTree(nn.Module): def __init__( self, levels, block, in_channels, out_channels, stride=1, dilation=1, cardinality=1, base_width=64, level_root=False, root_dim=0, root_kernel_size=1, root_shortcut=False, ): super(DlaTree, self).__init__() if root_dim == 0: root_dim = 2 * out_channels if level_root: root_dim += in_channels self.downsample = nn.MaxPool2d(stride, stride=stride) if stride > 1 else nn.Identity() self.project = nn.Identity() cargs = dict(dilation=dilation, cardinality=cardinality, base_width=base_width) if levels == 1: self.tree1 = block(in_channels, out_channels, stride, **cargs) self.tree2 = block(out_channels, out_channels, 1, **cargs) if in_channels != out_channels: # NOTE the official impl/weights have project layers in levels > 1 case that are never # used, I've moved the project layer here to avoid wasted params but old checkpoints will # need strict=False while loading. self.project = nn.Sequential( nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False), nn.BatchNorm2d(out_channels)) self.root = DlaRoot(root_dim, out_channels, root_kernel_size, root_shortcut) else: cargs.update(dict(root_kernel_size=root_kernel_size, root_shortcut=root_shortcut)) self.tree1 = DlaTree( levels - 1, block, in_channels, out_channels, stride, root_dim=0, **cargs, ) self.tree2 = DlaTree( levels - 1, block, out_channels, out_channels, root_dim=root_dim + out_channels, **cargs, ) self.root = None self.level_root = level_root self.root_dim = root_dim self.levels = levels def forward(self, x, shortcut: Optional[torch.Tensor] = None, children: Optional[List[torch.Tensor]] = None): if children is None: children = [] bottom = self.downsample(x) shortcut = self.project(bottom) if self.level_root: children.append(bottom) x1 = self.tree1(x, shortcut) if self.root is not None: # levels == 1 x2 = self.tree2(x1) x = self.root([x2, x1] + children) else: children.append(x1) x = self.tree2(x1, None, children) return x class DLA(nn.Module): def __init__( self, levels, channels, output_stride=32, num_classes=1000, in_chans=3, global_pool='avg', cardinality=1, base_width=64, block=DlaBottle2neck, shortcut_root=False, drop_rate=0.0, ): super(DLA, self).__init__() self.channels = channels self.num_classes = num_classes self.cardinality = cardinality self.base_width = base_width assert output_stride == 32 # FIXME support dilation self.base_layer = nn.Sequential( nn.Conv2d(in_chans, channels[0], kernel_size=7, stride=1, padding=3, bias=False), nn.BatchNorm2d(channels[0]), nn.ReLU(inplace=True), ) self.level0 = self._make_conv_level(channels[0], channels[0], levels[0]) self.level1 = self._make_conv_level(channels[0], channels[1], levels[1], stride=2) cargs = dict(cardinality=cardinality, base_width=base_width, root_shortcut=shortcut_root) self.level2 = DlaTree(levels[2], block, channels[1], channels[2], 2, level_root=False, **cargs) self.level3 = DlaTree(levels[3], block, channels[2], channels[3], 2, level_root=True, **cargs) self.level4 = DlaTree(levels[4], block, channels[3], channels[4], 2, level_root=True, **cargs) self.level5 = DlaTree(levels[5], block, channels[4], channels[5], 2, level_root=True, **cargs) self.feature_info = [ dict(num_chs=channels[0], reduction=1, module='level0'), # rare to have a meaningful stride 1 level dict(num_chs=channels[1], reduction=2, module='level1'), dict(num_chs=channels[2], reduction=4, module='level2'), dict(num_chs=channels[3], reduction=8, module='level3'), dict(num_chs=channels[4], reduction=16, module='level4'), dict(num_chs=channels[5], reduction=32, module='level5'), ] self.num_features = channels[-1] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True, drop_rate=drop_rate, ) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1): modules = [] for i in range(convs): modules.extend([ nn.Conv2d( inplanes, planes, kernel_size=3, stride=stride if i == 0 else 1, padding=dilation, bias=False, dilation=dilation), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)]) inplanes = planes return nn.Sequential(*modules) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^base_layer', blocks=r'^level(\d+)' if coarse else [ # an unusual arch, this achieves somewhat more granularity without getting super messy (r'^level(\d+)\.tree(\d+)', None), (r'^level(\d+)\.root', (2,)), (r'^level(\d+)', (1,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): x = self.base_layer(x) x = self.level0(x) x = self.level1(x) x = self.level2(x) x = self.level3(x) x = self.level4(x) x = self.level5(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dla(variant, pretrained=False, **kwargs): return build_model_with_cfg( DLA, variant, pretrained, pretrained_strict=False, feature_cfg=dict(out_indices=(1, 2, 3, 4, 5)), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'base_layer.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'dla34.in1k': _cfg(hf_hub_id='timm/'), 'dla46_c.in1k': _cfg(hf_hub_id='timm/'), 'dla46x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60x_c.in1k': _cfg(hf_hub_id='timm/'), 'dla60.in1k': _cfg(hf_hub_id='timm/'), 'dla60x.in1k': _cfg(hf_hub_id='timm/'), 'dla102.in1k': _cfg(hf_hub_id='timm/'), 'dla102x.in1k': _cfg(hf_hub_id='timm/'), 'dla102x2.in1k': _cfg(hf_hub_id='timm/'), 'dla169.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2net.in1k': _cfg(hf_hub_id='timm/'), 'dla60_res2next.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def dla60_res2net(pretrained=False, **kwargs) -> DLA: model_args = dict( levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=1, base_width=28) return _create_dla('dla60_res2net', pretrained, **dict(model_args, **kwargs)) @register_model def dla60_res2next(pretrained=False,**kwargs): model_args = dict( levels=(1, 1, 1, 2, 3, 1), channels=(16, 32, 128, 256, 512, 1024), block=DlaBottle2neck, cardinality=8, base_width=4) return _create_dla('dla60_res2next', pretrained, **dict(model_args, **kwargs)) @register_model def dla34(pretrained=False, **kwargs) -> DLA: # DLA-34 model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 128, 256, 512], block=DlaBasic) return _create_dla('dla34', pretrained, **dict(model_args, **kwargs)) @register_model def dla46_c(pretrained=False, **kwargs) -> DLA: # DLA-46-C model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck) return _create_dla('dla46_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla46x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-46-C model_args = dict( levels=[1, 1, 1, 2, 2, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla46x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x_c(pretrained=False, **kwargs) -> DLA: # DLA-X-60-C model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 64, 64, 128, 256], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x_c', pretrained, **dict(model_args, **kwargs)) @register_model def dla60(pretrained=False, **kwargs) -> DLA: # DLA-60 model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck) return _create_dla('dla60', pretrained, **dict(model_args, **kwargs)) @register_model def dla60x(pretrained=False, **kwargs) -> DLA: # DLA-X-60 model_args = dict( levels=[1, 1, 1, 2, 3, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4) return _create_dla('dla60x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102(pretrained=False, **kwargs) -> DLA: # DLA-102 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla102', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x(pretrained=False, **kwargs) -> DLA: # DLA-X-102 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=32, base_width=4, shortcut_root=True) return _create_dla('dla102x', pretrained, **dict(model_args, **kwargs)) @register_model def dla102x2(pretrained=False, **kwargs) -> DLA: # DLA-X-102 64 model_args = dict( levels=[1, 1, 1, 3, 4, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, cardinality=64, base_width=4, shortcut_root=True) return _create_dla('dla102x2', pretrained, **dict(model_args, **kwargs)) @register_model def dla169(pretrained=False, **kwargs) -> DLA: # DLA-169 model_args = dict( levels=[1, 1, 2, 3, 5, 1], channels=[16, 32, 128, 256, 512, 1024], block=DlaBottleneck, shortcut_root=True) return _create_dla('dla169', pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/dpn.py
""" PyTorch implementation of DualPathNetworks Based on original MXNet implementation https://github.com/cypw/DPNs with many ideas from another PyTorch implementation https://github.com/oyam/pytorch-DPNs. This implementation is compatible with the pretrained weights from cypw's MXNet implementation. Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DPN_MEAN, IMAGENET_DPN_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, ConvNormAct, create_conv2d, create_classifier, get_norm_act_layer from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['DPN'] class CatBnAct(nn.Module): def __init__(self, in_chs, norm_layer=BatchNormAct2d): super(CatBnAct, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass def forward(self, x): if isinstance(x, tuple): x = torch.cat(x, dim=1) return self.bn(x) class BnActConv2d(nn.Module): def __init__(self, in_chs, out_chs, kernel_size, stride, groups=1, norm_layer=BatchNormAct2d): super(BnActConv2d, self).__init__() self.bn = norm_layer(in_chs, eps=0.001) self.conv = create_conv2d(in_chs, out_chs, kernel_size, stride=stride, groups=groups) def forward(self, x): return self.conv(self.bn(x)) class DualPathBlock(nn.Module): def __init__( self, in_chs, num_1x1_a, num_3x3_b, num_1x1_c, inc, groups, block_type='normal', b=False, ): super(DualPathBlock, self).__init__() self.num_1x1_c = num_1x1_c self.inc = inc self.b = b if block_type == 'proj': self.key_stride = 1 self.has_proj = True elif block_type == 'down': self.key_stride = 2 self.has_proj = True else: assert block_type == 'normal' self.key_stride = 1 self.has_proj = False self.c1x1_w_s1 = None self.c1x1_w_s2 = None if self.has_proj: # Using different member names here to allow easier parameter key matching for conversion if self.key_stride == 2: self.c1x1_w_s2 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=2) else: self.c1x1_w_s1 = BnActConv2d( in_chs=in_chs, out_chs=num_1x1_c + 2 * inc, kernel_size=1, stride=1) self.c1x1_a = BnActConv2d(in_chs=in_chs, out_chs=num_1x1_a, kernel_size=1, stride=1) self.c3x3_b = BnActConv2d( in_chs=num_1x1_a, out_chs=num_3x3_b, kernel_size=3, stride=self.key_stride, groups=groups) if b: self.c1x1_c = CatBnAct(in_chs=num_3x3_b) self.c1x1_c1 = create_conv2d(num_3x3_b, num_1x1_c, kernel_size=1) self.c1x1_c2 = create_conv2d(num_3x3_b, inc, kernel_size=1) else: self.c1x1_c = BnActConv2d(in_chs=num_3x3_b, out_chs=num_1x1_c + inc, kernel_size=1, stride=1) self.c1x1_c1 = None self.c1x1_c2 = None @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor] pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] pass def forward(self, x) -> Tuple[torch.Tensor, torch.Tensor]: if isinstance(x, tuple): x_in = torch.cat(x, dim=1) else: x_in = x if self.c1x1_w_s1 is None and self.c1x1_w_s2 is None: # self.has_proj == False, torchscript requires condition on module == None x_s1 = x[0] x_s2 = x[1] else: # self.has_proj == True if self.c1x1_w_s1 is not None: # self.key_stride = 1 x_s = self.c1x1_w_s1(x_in) else: # self.key_stride = 2 x_s = self.c1x1_w_s2(x_in) x_s1 = x_s[:, :self.num_1x1_c, :, :] x_s2 = x_s[:, self.num_1x1_c:, :, :] x_in = self.c1x1_a(x_in) x_in = self.c3x3_b(x_in) x_in = self.c1x1_c(x_in) if self.c1x1_c1 is not None: # self.b == True, using None check for torchscript compat out1 = self.c1x1_c1(x_in) out2 = self.c1x1_c2(x_in) else: out1 = x_in[:, :self.num_1x1_c, :, :] out2 = x_in[:, self.num_1x1_c:, :, :] resid = x_s1 + out1 dense = torch.cat([x_s2, out2], dim=1) return resid, dense class DPN(nn.Module): def __init__( self, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128), k_r=96, groups=32, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', small=False, num_init_features=64, b=False, drop_rate=0., norm_layer='batchnorm2d', act_layer='relu', fc_act_layer='elu', ): super(DPN, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.b = b assert output_stride == 32 # FIXME look into dilation support norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=act_layer), eps=.001) fc_norm_layer = partial(get_norm_act_layer(norm_layer, act_layer=fc_act_layer), eps=.001, inplace=False) bw_factor = 1 if small else 4 blocks = OrderedDict() # conv1 blocks['conv1_1'] = ConvNormAct( in_chans, num_init_features, kernel_size=3 if small else 7, stride=2, norm_layer=norm_layer) blocks['conv1_pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.feature_info = [dict(num_chs=num_init_features, reduction=2, module='features.conv1_1')] # conv2 bw = 64 * bw_factor inc = inc_sec[0] r = (k_r * bw) // (64 * bw_factor) blocks['conv2_1'] = DualPathBlock(num_init_features, r, r, bw, inc, groups, 'proj', b) in_chs = bw + 3 * inc for i in range(2, k_sec[0] + 1): blocks['conv2_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=4, module=f'features.conv2_{k_sec[0]}')] # conv3 bw = 128 * bw_factor inc = inc_sec[1] r = (k_r * bw) // (64 * bw_factor) blocks['conv3_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[1] + 1): blocks['conv3_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=8, module=f'features.conv3_{k_sec[1]}')] # conv4 bw = 256 * bw_factor inc = inc_sec[2] r = (k_r * bw) // (64 * bw_factor) blocks['conv4_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[2] + 1): blocks['conv4_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=16, module=f'features.conv4_{k_sec[2]}')] # conv5 bw = 512 * bw_factor inc = inc_sec[3] r = (k_r * bw) // (64 * bw_factor) blocks['conv5_1'] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'down', b) in_chs = bw + 3 * inc for i in range(2, k_sec[3] + 1): blocks['conv5_' + str(i)] = DualPathBlock(in_chs, r, r, bw, inc, groups, 'normal', b) in_chs += inc self.feature_info += [dict(num_chs=in_chs, reduction=32, module=f'features.conv5_{k_sec[3]}')] blocks['conv5_bn_ac'] = CatBnAct(in_chs, norm_layer=fc_norm_layer) self.num_features = in_chs self.features = nn.Sequential(blocks) # Using 1x1 conv for the FC layer to allow the extra pooling scheme self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^features\.conv1', blocks=[ (r'^features\.conv(\d+)' if coarse else r'^features\.conv(\d+)_(\d+)', None), (r'^features\.conv5_bn_ac', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, use_conv=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) if pre_logits: return self.flatten(x) x = self.classifier(x) return self.flatten(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_dpn(variant, pretrained=False, **kwargs): return build_model_with_cfg( DPN, variant, pretrained, feature_cfg=dict(feature_concat=True, flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DPN_MEAN, 'std': IMAGENET_DPN_STD, 'first_conv': 'features.conv1_1.conv', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'dpn48b.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'dpn68.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn68b.ra_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'dpn68b.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn92.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn98.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn131.mx_in1k': _cfg(hf_hub_id='timm/'), 'dpn107.mx_in1k': _cfg(hf_hub_id='timm/') }) @register_model def dpn48b(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 6, 3), inc_sec=(16, 32, 32, 64), act_layer='silu') return _create_dpn('dpn48b', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn68(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( small=True, num_init_features=10, k_r=128, groups=32, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn68b(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( small=True, num_init_features=10, k_r=128, groups=32, b=True, k_sec=(3, 4, 12, 3), inc_sec=(16, 32, 32, 64)) return _create_dpn('dpn68b', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn92(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( num_init_features=64, k_r=96, groups=32, k_sec=(3, 4, 20, 3), inc_sec=(16, 32, 24, 128)) return _create_dpn('dpn92', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn98(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( num_init_features=96, k_r=160, groups=40, k_sec=(3, 6, 20, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn98', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn131(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( num_init_features=128, k_r=160, groups=40, k_sec=(4, 8, 28, 3), inc_sec=(16, 32, 32, 128)) return _create_dpn('dpn131', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def dpn107(pretrained=False, **kwargs) -> DPN: model_kwargs = dict( num_init_features=128, k_r=200, groups=50, k_sec=(4, 8, 20, 3), inc_sec=(20, 64, 64, 128)) return _create_dpn('dpn107', pretrained=pretrained, **dict(model_kwargs, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/edgenext.py
""" EdgeNeXt Paper: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications` - https://arxiv.org/abs/2206.10589 Original code and weights from https://github.com/mmaaz60/EdgeNeXt Modifications and additions for timm by / Copyright 2022, Ross Wightman """ import math from collections import OrderedDict from functools import partial from typing import Tuple import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_tf_, DropPath, LayerNorm2d, Mlp, SelectAdaptivePool2d, create_conv2d, \ use_fused_attn from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import named_apply, checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['EdgeNeXt'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim def forward(self, shape: Tuple[int, int, int]): device = self.token_projection.weight.device dtype = self.token_projection.weight.dtype inv_mask = ~torch.zeros(shape).to(device=device, dtype=torch.bool) y_embed = inv_mask.cumsum(1, dtype=dtype) x_embed = inv_mask.cumsum(2, dtype=dtype) eps = 1e-6 y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=dtype, device=device) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack( (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack( (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos) return pos class ConvBlock(nn.Module): def __init__( self, dim, dim_out=None, kernel_size=7, stride=1, conv_bias=True, expand_ratio=4, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., ): super().__init__() dim_out = dim_out or dim self.shortcut_after_dw = stride > 1 or dim != dim_out self.conv_dw = create_conv2d( dim, dim_out, kernel_size=kernel_size, stride=stride, depthwise=True, bias=conv_bias) self.norm = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(expand_ratio * dim_out), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim_out)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.conv_dw(x) if self.shortcut_after_dw: shortcut = x x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class CrossCovarianceAttn(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0. ): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # NOTE, this is NOT spatial attn, q, k, v are B, num_heads, C, L --> C x C attn map attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v) x = x.permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class SplitTransposeBlock(nn.Module): def __init__( self, dim, num_scales=1, num_heads=8, expand_ratio=4, use_pos_emb=True, conv_bias=True, qkv_bias=True, ls_init_value=1e-6, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_path=0., attn_drop=0., proj_drop=0. ): super().__init__() width = max(int(math.ceil(dim / num_scales)), int(math.floor(dim // num_scales))) self.width = width self.num_scales = max(1, num_scales - 1) convs = [] for i in range(self.num_scales): convs.append(create_conv2d(width, width, kernel_size=3, depthwise=True, bias=conv_bias)) self.convs = nn.ModuleList(convs) self.pos_embd = None if use_pos_emb: self.pos_embd = PositionalEncodingFourier(dim=dim) self.norm_xca = norm_layer(dim) self.gamma_xca = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.xca = CrossCovarianceAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.norm = norm_layer(dim, eps=1e-6) self.mlp = Mlp(dim, int(expand_ratio * dim), act_layer=act_layer) self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # scales code re-written for torchscript as per my res2net fixes -rw # NOTE torch.split(x, self.width, 1) causing issues with ONNX export spx = x.chunk(len(self.convs) + 1, dim=1) spo = [] sp = spx[0] for i, conv in enumerate(self.convs): if i > 0: sp = sp + spx[i] sp = conv(sp) spo.append(sp) spo.append(spx[-1]) x = torch.cat(spo, 1) # XCA B, C, H, W = x.shape x = x.reshape(B, C, H * W).permute(0, 2, 1) if self.pos_embd is not None: pos_encoding = self.pos_embd((B, H, W)).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = x + self.drop_path(self.gamma_xca * self.xca(self.norm_xca(x))) x = x.reshape(B, H, W, C) # Inverted Bottleneck x = self.norm(x) x = self.mlp(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) x = shortcut + self.drop_path(x) return x class EdgeNeXtStage(nn.Module): def __init__( self, in_chs, out_chs, stride=2, depth=2, num_global_blocks=1, num_heads=4, scales=2, kernel_size=7, expand_ratio=4, use_pos_emb=False, downsample_block=False, conv_bias=True, ls_init_value=1.0, drop_path_rates=None, norm_layer=LayerNorm2d, norm_layer_cl=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU ): super().__init__() self.grad_checkpointing = False if downsample_block or stride == 1: self.downsample = nn.Identity() else: self.downsample = nn.Sequential( norm_layer(in_chs), nn.Conv2d(in_chs, out_chs, kernel_size=2, stride=2, bias=conv_bias) ) in_chs = out_chs stage_blocks = [] for i in range(depth): if i < depth - num_global_blocks: stage_blocks.append( ConvBlock( dim=in_chs, dim_out=out_chs, stride=stride if downsample_block and i == 0 else 1, conv_bias=conv_bias, kernel_size=kernel_size, expand_ratio=expand_ratio, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) else: stage_blocks.append( SplitTransposeBlock( dim=in_chs, num_scales=scales, num_heads=num_heads, expand_ratio=expand_ratio, use_pos_emb=use_pos_emb, conv_bias=conv_bias, ls_init_value=ls_init_value, drop_path=drop_path_rates[i], norm_layer=norm_layer_cl, act_layer=act_layer, ) ) in_chs = out_chs self.blocks = nn.Sequential(*stage_blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EdgeNeXt(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', dims=(24, 48, 88, 168), depths=(3, 3, 9, 3), global_block_counts=(0, 1, 1, 1), kernel_sizes=(3, 5, 7, 9), heads=(8, 8, 8, 8), d2_scales=(2, 2, 3, 4), use_pos_emb=(False, True, False, False), ls_init_value=1e-6, head_init_scale=1., expand_ratio=4, downsample_block=False, conv_bias=True, stem_type='patch', head_norm_first=False, act_layer=nn.GELU, drop_path_rate=0., drop_rate=0., ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.drop_rate = drop_rate norm_layer = partial(LayerNorm2d, eps=1e-6) norm_layer_cl = partial(nn.LayerNorm, eps=1e-6) self.feature_info = [] assert stem_type in ('patch', 'overlap') if stem_type == 'patch': self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4, bias=conv_bias), norm_layer(dims[0]), ) else: self.stem = nn.Sequential( nn.Conv2d(in_chans, dims[0], kernel_size=9, stride=4, padding=9 // 2, bias=conv_bias), norm_layer(dims[0]), ) curr_stride = 4 stages = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] in_chs = dims[0] for i in range(4): stride = 2 if curr_stride == 2 or i > 0 else 1 # FIXME support dilation / output_stride curr_stride *= stride stages.append(EdgeNeXtStage( in_chs=in_chs, out_chs=dims[i], stride=stride, depth=depths[i], num_global_blocks=global_block_counts[i], num_heads=heads[i], drop_path_rates=dp_rates[i], scales=d2_scales[i], expand_ratio=expand_ratio, kernel_size=kernel_sizes[i], use_pos_emb=use_pos_emb[i], ls_init_value=ls_init_value, downsample_block=downsample_block, conv_bias=conv_bias, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, act_layer=act_layer, )) # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2 in_chs = dims[i] self.feature_info += [dict(num_chs=in_chs, reduction=curr_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.num_features = dims[-1] self.norm_pre = norm_layer(self.num_features) if head_norm_first else nn.Identity() self.head = nn.Sequential(OrderedDict([ ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), ('norm', nn.Identity() if head_norm_first else norm_layer(self.num_features)), ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), ('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())])) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.downsample', (0,)), # blocks (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm_pre', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): if global_pool is not None: self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.head.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm_pre(x) return x def forward_head(self, x, pre_logits: bool = False): # NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :( x = self.head.global_pool(x) x = self.head.norm(x) x = self.head.flatten(x) x = self.head.drop(x) return x if pre_logits else self.head.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_tf_(module.weight, std=.02) nn.init.zeros_(module.bias) if name and 'head.' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def checkpoint_filter_fn(state_dict, model): """ Remap FB checkpoints -> timm """ if 'head.norm.weight' in state_dict or 'norm_pre.weight' in state_dict: return state_dict # non-FB checkpoint # models were released as train checkpoints... :/ if 'model_ema' in state_dict: state_dict = state_dict['model_ema'] elif 'model' in state_dict: state_dict = state_dict['model'] elif 'state_dict' in state_dict: state_dict = state_dict['state_dict'] out_dict = {} import re for k, v in state_dict.items(): k = k.replace('downsample_layers.0.', 'stem.') k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k) k = k.replace('dwconv', 'conv_dw') k = k.replace('pwconv', 'mlp.fc') k = k.replace('head.', 'head.fc.') if k.startswith('norm.'): k = k.replace('norm', 'head.norm') if v.ndim == 2 and 'head' not in k: model_shape = model.state_dict()[k].shape v = v.reshape(model_shape) out_dict[k] = v return out_dict def _create_edgenext(variant, pretrained=False, **kwargs): model = build_model_with_cfg( EdgeNeXt, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'edgenext_xx_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_x_small.in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=1.0), 'edgenext_small.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.usi_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_base.in21k_ft_in1k': _cfg( # USI weights hf_hub_id='timm/', crop_pct=0.95, test_input_size=(3, 320, 320), test_crop_pct=1.0, ), 'edgenext_small_rw.sw_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 320, 320), test_crop_pct=1.0, ), }) @register_model def edgenext_xx_small(pretrained=False, **kwargs) -> EdgeNeXt: # 1.33M & 260.58M @ 256 resolution # 71.23% Top-1 accuracy # No AA, Color Jitter=0.4, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=51.66 versus 47.67 for MobileViT_XXS # For A100: FPS @ BS=1: 212.13 & @ BS=256: 7042.06 versus FPS @ BS=1: 96.68 & @ BS=256: 4624.71 for MobileViT_XXS model_kwargs = dict(depths=(2, 2, 6, 2), dims=(24, 48, 88, 168), heads=(4, 4, 4, 4), **kwargs) return _create_edgenext('edgenext_xx_small', pretrained=pretrained, **model_kwargs) @register_model def edgenext_x_small(pretrained=False, **kwargs) -> EdgeNeXt: # 2.34M & 538.0M @ 256 resolution # 75.00% Top-1 accuracy # No AA, No Mixup & Cutmix, DropPath=0.0, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=31.61 versus 28.49 for MobileViT_XS # For A100: FPS @ BS=1: 179.55 & @ BS=256: 4404.95 versus FPS @ BS=1: 94.55 & @ BS=256: 2361.53 for MobileViT_XS model_kwargs = dict(depths=(3, 3, 9, 3), dims=(32, 64, 100, 192), heads=(4, 4, 4, 4), **kwargs) return _create_edgenext('edgenext_x_small', pretrained=pretrained, **model_kwargs) @register_model def edgenext_small(pretrained=False, **kwargs) -> EdgeNeXt: # 5.59M & 1260.59M @ 256 resolution # 79.43% Top-1 accuracy # AA=True, No Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=20.47 versus 18.86 for MobileViT_S # For A100: FPS @ BS=1: 172.33 & @ BS=256: 3010.25 versus FPS @ BS=1: 93.84 & @ BS=256: 1785.92 for MobileViT_S model_kwargs = dict(depths=(3, 3, 9, 3), dims=(48, 96, 160, 304), **kwargs) return _create_edgenext('edgenext_small', pretrained=pretrained, **model_kwargs) @register_model def edgenext_base(pretrained=False, **kwargs) -> EdgeNeXt: # 18.51M & 3840.93M @ 256 resolution # 82.5% (normal) 83.7% (USI) Top-1 accuracy # AA=True, Mixup & Cutmix, DropPath=0.1, BS=4096, lr=0.006, multi-scale-sampler # Jetson FPS=xx.xx versus xx.xx for MobileViT_S # For A100: FPS @ BS=1: xxx.xx & @ BS=256: xxxx.xx model_kwargs = dict(depths=[3, 3, 9, 3], dims=[80, 160, 288, 584], **kwargs) return _create_edgenext('edgenext_base', pretrained=pretrained, **model_kwargs) @register_model def edgenext_small_rw(pretrained=False, **kwargs) -> EdgeNeXt: model_kwargs = dict( depths=(3, 3, 9, 3), dims=(48, 96, 192, 384), downsample_block=True, conv_bias=False, stem_type='overlap', **kwargs) return _create_edgenext('edgenext_small_rw', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/efficientformer.py
""" EfficientFormer @article{li2022efficientformer, title={EfficientFormer: Vision Transformers at MobileNet Speed}, author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, Sergey and Wang, Yanzhi and Ren, Jian}, journal={arXiv preprint arXiv:2206.01191}, year={2022} } Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc. Modifications and timm support by / Copyright 2022, Ross Wightman """ from typing import Dict import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['EfficientFormer'] # model_registry will add each entrypoint fn to this EfficientFormer_width = { 'l1': (48, 96, 224, 448), 'l3': (64, 128, 320, 512), 'l7': (96, 192, 384, 768), } EfficientFormer_depth = { 'l1': (3, 2, 6, 4), 'l3': (4, 4, 12, 6), 'l7': (6, 6, 18, 8), } class Attention(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7 ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = self.val_dim * num_heads self.attn_ratio = attn_ratio self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim) self.proj = nn.Linear(self.val_attn_dim, dim) resolution = to_2tuple(resolution) pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) self.register_buffer('attention_bias_idxs', rel_pos) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): # x (B,N,C) B, N, C = x.shape qkv = self.qkv(x) qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) x = self.proj(x) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1)) self.add_module('norm1', norm_layer(out_chs // 2)) self.add_module('act1', act_layer()) self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1)) self.add_module('norm2', norm_layer(out_chs)) self.add_module('act2', act_layer()) class Downsample(nn.Module): """ Downsampling via strided conv w/ norm Input: tensor in shape [B, C, H, W] Output: tensor in shape [B, C, H/stride, W/stride] """ def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d): super().__init__() if padding is None: padding = kernel_size // 2 self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(out_chs) def forward(self, x): x = self.conv(x) x = self.norm(x) return x class Flat(nn.Module): def __init__(self, ): super().__init__() def forward(self, x): x = x.flatten(2).transpose(1, 2) return x class Pooling(nn.Module): """ Implementation of pooling for PoolFormer --pool_size: pooling size """ def __init__(self, pool_size=3): super().__init__() self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, x): return self.pool(x) - x class ConvMlpWithNorm(nn.Module): """ Implementation of MLP with 1*1 convolutions. Input: tensor with shape [B, C, H, W] """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0. ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, 1) self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, 1) self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity() self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.norm2(x) x = self.drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class MetaBlock1d(nn.Module): def __init__( self, dim, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.LayerNorm, proj_drop=0., drop_path=0., layer_scale_init_value=1e-5 ): super().__init__() self.norm1 = norm_layer(dim) self.token_mixer = Attention(dim) self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.ls1 = LayerScale(dim, layer_scale_init_value) self.ls2 = LayerScale(dim, layer_scale_init_value) def forward(self, x): x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class MetaBlock2d(nn.Module): def __init__( self, dim, pool_size=3, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0., drop_path=0., layer_scale_init_value=1e-5 ): super().__init__() self.token_mixer = Pooling(pool_size=pool_size) self.ls1 = LayerScale2d(dim, layer_scale_init_value) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = ConvMlpWithNorm( dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop, ) self.ls2 = LayerScale2d(dim, layer_scale_init_value) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class EfficientFormerStage(nn.Module): def __init__( self, dim, dim_out, depth, downsample=True, num_vit=1, pool_size=3, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, norm_layer_cl=nn.LayerNorm, proj_drop=.0, drop_path=0., layer_scale_init_value=1e-5, ): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer) dim = dim_out else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] if num_vit and num_vit >= depth: blocks.append(Flat()) for block_idx in range(depth): remain_idx = depth - block_idx - 1 if num_vit and num_vit > remain_idx: blocks.append( MetaBlock1d( dim, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer_cl, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, )) else: blocks.append( MetaBlock2d( dim, pool_size=pool_size, mlp_ratio=mlp_ratio, act_layer=act_layer, norm_layer=norm_layer, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, )) if num_vit and num_vit == remain_idx: blocks.append(Flat()) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormer(nn.Module): def __init__( self, depths, embed_dims=None, in_chans=3, num_classes=1000, global_pool='avg', downsamples=None, num_vit=0, mlp_ratios=4, pool_size=3, layer_scale_init_value=1e-5, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, norm_layer_cl=nn.LayerNorm, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., **kwargs ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer) prev_dim = embed_dims[0] # stochastic depth decay rule dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) stages = [] for i in range(len(depths)): stage = EfficientFormerStage( prev_dim, embed_dims[i], depths[i], downsample=downsamples[i], num_vit=num_vit if i == 3 else 0, pool_size=pool_size, mlp_ratio=mlp_ratios, act_layer=act_layer, norm_layer_cl=norm_layer_cl, norm_layer=norm_layer, proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value, ) prev_dim = embed_dims[i] stages.append(stage) self.stages = nn.Sequential(*stages) # Classifier head self.num_features = embed_dims[-1] self.norm = norm_layer_cl(self.num_features) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # assuming model is always distilled (valid for current checkpoints, will split def if that changes) self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token self.apply(self._init_weights) # init for classification def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'stem.0.weight' in state_dict: return state_dict # non-original checkpoint, no remapping needed out_dict = {} import re stage_idx = 0 for k, v in state_dict.items(): if k.startswith('patch_embed'): k = k.replace('patch_embed.0', 'stem.conv1') k = k.replace('patch_embed.1', 'stem.norm1') k = k.replace('patch_embed.3', 'stem.conv2') k = k.replace('patch_embed.4', 'stem.norm2') if re.match(r'network\.(\d+)\.proj\.weight', k): stage_idx += 1 k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k) k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k) k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k) k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k) k = k.replace('dist_head', 'head_dist') out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': .95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'), **kwargs } default_cfgs = generate_default_cfgs({ 'efficientformer_l1.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformer_l3.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformer_l7.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), }) def _create_efficientformer(variant, pretrained=False, **kwargs): model = build_model_with_cfg( EfficientFormer, variant, pretrained, pretrained_filter_fn=_checkpoint_filter_fn, **kwargs) return model @register_model def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict( depths=EfficientFormer_depth['l1'], embed_dims=EfficientFormer_width['l1'], num_vit=1, ) return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict( depths=EfficientFormer_depth['l3'], embed_dims=EfficientFormer_width['l3'], num_vit=4, ) return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer: model_args = dict( depths=EfficientFormer_depth['l7'], embed_dims=EfficientFormer_width['l7'], num_vit=8, ) return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/efficientformer_v2.py
""" EfficientFormer-V2 @article{ li2022rethinking, title={Rethinking Vision Transformers for MobileNet Size and Speed}, author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian}, journal={arXiv preprint arXiv:2212.08059}, year={2022} } Significantly refactored and cleaned up for timm from original at: https://github.com/snap-research/EfficientFormer Original code licensed Apache 2.0, Copyright (c) 2022 Snap Inc. Modifications and timm support by / Copyright 2023, Ross Wightman """ import math from functools import partial from typing import Dict import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model EfficientFormer_width = { 'L': (40, 80, 192, 384), # 26m 83.3% 6attn 'S2': (32, 64, 144, 288), # 12m 81.6% 4attn dp0.02 'S1': (32, 48, 120, 224), # 6.1m 79.0 'S0': (32, 48, 96, 176), # 75.0 75.7 } EfficientFormer_depth = { 'L': (5, 5, 15, 10), # 26m 83.3% 'S2': (4, 4, 12, 8), # 12m 'S1': (3, 3, 9, 6), # 79.0 'S0': (2, 2, 6, 4), # 75.7 } EfficientFormer_expansion_ratios = { 'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)), 'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)), 'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)), 'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4)), } class ConvNorm(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=True, norm_layer='batchnorm2d', norm_kwargs=None, ): norm_kwargs = norm_kwargs or {} super(ConvNorm, self).__init__() self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, ) self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class Attention2d(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7, act_layer=nn.GELU, stride=None, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim resolution = to_2tuple(resolution) if stride is not None: resolution = tuple([math.ceil(r / stride) for r in resolution]) self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim) self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear') else: self.stride_conv = None self.upsample = None self.resolution = resolution self.N = self.resolution[0] * self.resolution[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio kh = self.key_dim * self.num_heads self.q = ConvNorm(dim, kh) self.k = ConvNorm(dim, kh) self.v = ConvNorm(dim, self.dh) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh) self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.act = act_layer() self.proj = ConvNorm(self.dh, dim, 1) pos = torch.stack(torch.meshgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N)) self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape if self.stride_conv is not None: x = self.stride_conv(x) q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = self.talking_head1(attn) attn = attn.softmax(dim=-1) attn = self.talking_head2(attn) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local if self.upsample is not None: x = self.upsample(x) x = self.act(x) x = self.proj(x) return x class LocalGlobalQuery(torch.nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.pool = nn.AvgPool2d(1, 2, 0) self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim) self.proj = ConvNorm(in_dim, out_dim, 1) def forward(self, x): local_q = self.local(x) pool_q = self.pool(x) q = local_q + pool_q q = self.proj(q) return q class Attention2dDownsample(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=16, num_heads=8, attn_ratio=4, resolution=7, out_dim=None, act_layer=nn.GELU, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.resolution = to_2tuple(resolution) self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution]) self.N = self.resolution[0] * self.resolution[1] self.N2 = self.resolution2[0] * self.resolution2[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio self.out_dim = out_dim or dim kh = self.key_dim * self.num_heads self.q = LocalGlobalQuery(dim, kh) self.k = ConvNorm(dim, kh, 1) self.v = ConvNorm(dim, self.dh, 1) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh) self.act = act_layer() self.proj = ConvNorm(self.dh, self.out_dim, 1) self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N)) k_pos = torch.stack(torch.meshgrid(torch.arange( self.resolution[1]), torch.arange(self.resolution[1]))).flatten(1) q_pos = torch.stack(torch.meshgrid( torch.arange(0, self.resolution[0], step=2), torch.arange(0, self.resolution[1], step=2))).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local x = self.act(x) x = self.proj(x) return x class Downsample(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=2, padding=1, resolution=7, use_attn=False, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) norm_layer = norm_layer or nn.Identity() self.conv = ConvNorm( in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding, norm_layer=norm_layer, ) if use_attn: self.attn = Attention2dDownsample( dim=in_chs, out_dim=out_chs, resolution=resolution, act_layer=act_layer, ) else: self.attn = None def forward(self, x): out = self.conv(x) if self.attn is not None: return self.attn(x) + out return out class ConvMlpWithNorm(nn.Module): """ Implementation of MLP with 1*1 convolutions. Input: tensor with shape [B, C, H, W] """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0., mid_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = ConvNormAct( in_features, hidden_features, 1, bias=True, norm_layer=norm_layer, act_layer=act_layer) if mid_conv: self.mid = ConvNormAct( hidden_features, hidden_features, 3, groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer) else: self.mid = nn.Identity() self.drop1 = nn.Dropout(drop) self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.mid(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class EfficientFormerV2Block(nn.Module): def __init__( self, dim, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0., drop_path=0., layer_scale_init_value=1e-5, resolution=7, stride=None, use_attn=True, ): super().__init__() if use_attn: self.token_mixer = Attention2d( dim, resolution=resolution, act_layer=act_layer, stride=stride, ) self.ls1 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() else: self.token_mixer = None self.ls1 = None self.drop_path1 = None self.mlp = ConvMlpWithNorm( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop, mid_conv=True, ) self.ls2 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): if self.token_mixer is not None: x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.conv1 = ConvNormAct( in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) self.conv2 = ConvNormAct( out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) class EfficientFormerV2Stage(nn.Module): def __init__( self, dim, dim_out, depth, resolution=7, downsample=True, block_stride=None, downsample_use_attn=False, block_use_attn=False, num_vit=1, mlp_ratio=4., proj_drop=.0, drop_path=0., layer_scale_init_value=1e-5, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() self.grad_checkpointing = False mlp_ratio = to_ntuple(depth)(mlp_ratio) resolution = to_2tuple(resolution) if downsample: self.downsample = Downsample( dim, dim_out, use_attn=downsample_use_attn, resolution=resolution, norm_layer=norm_layer, act_layer=act_layer, ) dim = dim_out resolution = tuple([math.ceil(r / 2) for r in resolution]) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): remain_idx = depth - num_vit - 1 b = EfficientFormerV2Block( dim, resolution=resolution, stride=block_stride, mlp_ratio=mlp_ratio[block_idx], use_attn=block_use_attn and block_idx > remain_idx, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) blocks += [b] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormerV2(nn.Module): def __init__( self, depths, in_chans=3, img_size=224, global_pool='avg', embed_dims=None, downsamples=None, mlp_ratios=4, norm_layer='batchnorm2d', norm_eps=1e-5, act_layer='gelu', num_classes=1000, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., layer_scale_init_value=1e-5, num_vit=0, distillation=True, ): super().__init__() assert global_pool in ('avg', '') self.num_classes = num_classes self.global_pool = global_pool self.feature_info = [] img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) act_layer = get_act_layer(act_layer) self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer) prev_dim = embed_dims[0] stride = 4 num_stages = len(depths) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) stages = [] for i in range(num_stages): curr_resolution = tuple([math.ceil(s / stride) for s in img_size]) stage = EfficientFormerV2Stage( prev_dim, embed_dims[i], depth=depths[i], resolution=curr_resolution, downsample=downsamples[i], block_stride=2 if i == 2 else None, downsample_use_attn=i >= 3, block_use_attn=i >= 2, num_vit=num_vit, mlp_ratio=mlp_ratios[i], proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) if downsamples[i]: stride *= 2 prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')] stages.append(stage) self.stages = nn.Sequential(*stages) # Classifier head self.num_features = embed_dims[-1] self.norm = norm_layer(embed_dims[-1]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.dist = distillation if self.dist: self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() else: self.head_dist = None self.apply(self.init_weights) self.distilled_training = False # init for classification def init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(2, 3)) x = self.head_drop(x) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': .95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv', **kwargs } default_cfgs = generate_default_cfgs({ 'efficientformerv2_s0.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s1.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s2.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_l.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), }) def _create_efficientformerv2(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( EfficientFormerV2, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S0'], embed_dims=EfficientFormer_width['S0'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S0'], ) return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S1'], embed_dims=EfficientFormer_width['S1'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S1'], ) return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S2'], embed_dims=EfficientFormer_width['S2'], num_vit=4, drop_path_rate=0.02, mlp_ratios=EfficientFormer_expansion_ratios['S2'], ) return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['L'], embed_dims=EfficientFormer_width['L'], num_vit=6, drop_path_rate=0.1, mlp_ratios=EfficientFormer_expansion_ratios['L'], ) return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/efficientnet.py
""" The EfficientNet Family in PyTorch An implementation of EfficienNet that covers variety of related models with efficient architectures: * EfficientNet-V2 - `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 * EfficientNet (B0-B8, L2 + Tensorflow pretrained AutoAug/RandAug/AdvProp/NoisyStudent weight ports) - EfficientNet: Rethinking Model Scaling for CNNs - https://arxiv.org/abs/1905.11946 - CondConv: Conditionally Parameterized Convolutions for Efficient Inference - https://arxiv.org/abs/1904.04971 - Adversarial Examples Improve Image Recognition - https://arxiv.org/abs/1911.09665 - Self-training with Noisy Student improves ImageNet classification - https://arxiv.org/abs/1911.04252 * MixNet (Small, Medium, and Large) - MixConv: Mixed Depthwise Convolutional Kernels - https://arxiv.org/abs/1907.09595 * MNasNet B1, A1 (SE), Small - MnasNet: Platform-Aware Neural Architecture Search for Mobile - https://arxiv.org/abs/1807.11626 * FBNet-C - FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable NAS - https://arxiv.org/abs/1812.03443 * Single-Path NAS Pixel1 - Single-Path NAS: Designing Hardware-Efficient ConvNets - https://arxiv.org/abs/1904.02877 * TinyNet - Model Rubik's Cube: Twisting Resolution, Depth and Width for TinyNets - https://arxiv.org/abs/2010.14819 - Definitions & weights borrowed from https://github.com/huawei-noah/CV-Backbones/tree/master/tinynet_pytorch * And likely more... The majority of the above models (EfficientNet*, MixNet, MnasNet) and original weights were made available by Mingxing Tan, Quoc Le, and other members of their Google Brain team. Thanks for consistently releasing the models and weights open source! Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import List import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_conv2d, create_classifier, get_norm_act_layer, GroupNormAct from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['EfficientNet', 'EfficientNetFeatures'] class EfficientNet(nn.Module): """ EfficientNet A flexible and performant PyTorch implementation of efficient network architectures, including: * EfficientNet-V2 Small, Medium, Large, XL & B0-B3 * EfficientNet B0-B8, L2 * EfficientNet-EdgeTPU * EfficientNet-CondConv * MixNet S, M, L, XL * MnasNet A1, B1, and small * MobileNet-V2 * FBNet C * Single-Path NAS Pixel1 * TinyNet """ def __init__( self, block_args, num_classes=1000, num_features=1280, in_chans=3, stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0., global_pool='avg' ): super(EfficientNet, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.num_features = num_features self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features head_chs = builder.in_chs # Head + Pooling self.conv_head = create_conv2d(head_chs, self.num_features, 1, padding=pad_type) self.bn2 = norm_act_layer(self.num_features, inplace=True) self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.conv_head, self.bn2, self.global_pool]) layers.extend([nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head|bn2', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) x = self.conv_head(x) x = self.bn2(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class EfficientNetFeatures(nn.Module): """ EfficientNet Feature Extractor A work-in-progress feature extraction module for EfficientNet, to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, stem_size=32, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0. ): super(EfficientNetFeatures, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_effnet(variant, pretrained=False, **kwargs): features_mode = '' model_cls = EfficientNet kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'global_pool') model_cls = EfficientNetFeatures features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.pretrained_cfg = model.default_cfg = pretrained_cfg_for_features(model.pretrained_cfg) return model def _gen_mnasnet_a1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-a1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_noskip'], # stage 1, 112x112 in ['ir_r2_k3_s2_e6_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25'], # stage 3, 28x28 in ['ir_r4_k3_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40'], # stage 3, 28x28 in ['ir_r3_k5_s2_e6_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a mnasnet-b1 model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet Paper: https://arxiv.org/pdf/1807.11626.pdf. Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ ['ds_r1_k3_s1_c8'], ['ir_r1_k3_s2_e3_c16'], ['ir_r2_k3_s2_e6_c16'], ['ir_r4_k5_s2_e6_c32_se0.25'], ['ir_r3_k3_s1_e6_c32_se0.25'], ['ir_r3_k5_s2_e6_c88_se0.25'], ['ir_r1_k3_s1_e6_c144'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=8, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v2( variant, channel_multiplier=1.0, depth_multiplier=1.0, fix_stem_head=False, pretrained=False, **kwargs): """ Generate MobileNet-V2 network Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py Paper: https://arxiv.org/abs/1801.04381 """ arch_def = [ ['ds_r1_k3_s1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r3_k3_s2_e6_c32'], ['ir_r4_k3_s2_e6_c64'], ['ir_r3_k3_s1_e6_c96'], ['ir_r3_k3_s2_e6_c160'], ['ir_r1_k3_s1_e6_c320'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier=depth_multiplier, fix_first_last=fix_stem_head), num_features=1280 if fix_stem_head else max(1280, round_chs_fn(1280)), stem_size=32, fix_stem=fix_stem_head, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu6'), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_fbnetc(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ FBNet-C Paper: https://arxiv.org/abs/1812.03443 Ref Impl: https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_modeldef.py NOTE: the impl above does not relate to the 'C' variant here, that was derived from paper, it was used to confirm some building block details """ arch_def = [ ['ir_r1_k3_s1_e1_c16'], ['ir_r1_k3_s2_e6_c24', 'ir_r2_k3_s1_e1_c24'], ['ir_r1_k5_s2_e6_c32', 'ir_r1_k5_s1_e3_c32', 'ir_r1_k5_s1_e6_c32', 'ir_r1_k3_s1_e6_c32'], ['ir_r1_k5_s2_e6_c64', 'ir_r1_k5_s1_e3_c64', 'ir_r2_k5_s1_e6_c64'], ['ir_r3_k5_s1_e6_c112', 'ir_r1_k5_s1_e3_c112'], ['ir_r4_k5_s2_e6_c184'], ['ir_r1_k3_s1_e6_c352'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, num_features=1984, # paper suggests this, but is not 100% clear round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_spnasnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates the Single-Path NAS model from search targeted for Pixel1 phone. Paper: https://arxiv.org/abs/1904.02877 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_c16_noskip'], # stage 1, 112x112 in ['ir_r3_k3_s2_e3_c24'], # stage 2, 56x56 in ['ir_r1_k5_s2_e6_c40', 'ir_r3_k3_s1_e3_c40'], # stage 3, 28x28 in ['ir_r1_k5_s2_e6_c80', 'ir_r3_k3_s1_e3_c80'], # stage 4, 14x14in ['ir_r1_k5_s1_e6_c96', 'ir_r3_k5_s1_e3_c96'], # stage 5, 14x14in ['ir_r4_k5_s2_e6_c192'], # stage 6, 7x7 in ['ir_r1_k3_s1_e6_c320_noskip'] ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.0, channel_divisor=8, group_size=None, pretrained=False, **kwargs): """Creates an EfficientNet model. Ref impl: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-b0': (1.0, 1.0, 224, 0.2), 'efficientnet-b1': (1.0, 1.1, 240, 0.2), 'efficientnet-b2': (1.1, 1.2, 260, 0.3), 'efficientnet-b3': (1.2, 1.4, 300, 0.3), 'efficientnet-b4': (1.4, 1.8, 380, 0.4), 'efficientnet-b5': (1.6, 2.2, 456, 0.4), 'efficientnet-b6': (1.8, 2.6, 528, 0.5), 'efficientnet-b7': (2.0, 3.1, 600, 0.5), 'efficientnet-b8': (2.2, 3.6, 672, 0.5), 'efficientnet-l2': (4.3, 5.3, 800, 0.5), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, divisor=channel_divisor) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_edge( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, pretrained=False, **kwargs): """ Creates an EfficientNet-EdgeTPU model Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/edgetpu """ arch_def = [ # NOTE `fc` is present to override a mismatch between stem channels and in chs not # present in other models ['er_r1_k3_s1_e4_c24_fc24_noskip'], ['er_r2_k3_s2_e8_c32'], ['er_r4_k3_s2_e8_c48'], ['ir_r5_k5_s2_e8_c96'], ['ir_r4_k5_s1_e8_c144'], ['ir_r2_k5_s2_e8_c192'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'relu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_condconv( variant, channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=1, pretrained=False, **kwargs): """Creates an EfficientNet-CondConv model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/condconv """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25_cc4'], ['ir_r4_k5_s2_e6_c192_se0.25_cc4'], ['ir_r1_k3_s1_e6_c320_se0.25_cc4'], ] # NOTE unlike official impl, this one uses `cc<x>` option where x is the base number of experts for each stage and # the expert_multiplier increases that on a per-model basis as with depth/channel multipliers round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, experts_multiplier=experts_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'swish'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnet_lite(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates an EfficientNet-Lite model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite Paper: https://arxiv.org/abs/1905.11946 EfficientNet params name: (channel_multiplier, depth_multiplier, resolution, dropout_rate) 'efficientnet-lite0': (1.0, 1.0, 224, 0.2), 'efficientnet-lite1': (1.0, 1.1, 240, 0.2), 'efficientnet-lite2': (1.1, 1.2, 260, 0.3), 'efficientnet-lite3': (1.2, 1.4, 280, 0.3), 'efficientnet-lite4': (1.4, 1.8, 300, 0.3), Args: channel_multiplier: multiplier to number of channels per layer depth_multiplier: multiplier to number of repeats per stage """ arch_def = [ ['ds_r1_k3_s1_e1_c16'], ['ir_r2_k3_s2_e6_c24'], ['ir_r2_k5_s2_e6_c40'], ['ir_r3_k3_s2_e6_c80'], ['ir_r3_k5_s1_e6_c112'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, fix_first_last=True), num_features=1280, stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), act_layer=resolve_act_layer(kwargs, 'relu6'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_base( variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 base model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r1_k3_s1_e1_c16_skip'], ['er_r2_k3_s2_e4_c32'], ['er_r2_k3_s2_e4_c48'], ['ir_r3_k3_s2_e4_c96_se0.25'], ['ir_r5_k3_s1_e6_c112_se0.25'], ['ir_r8_k3_s2_e6_c192_se0.25'], ] round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=round_chs_fn(1280), stem_size=32, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_s( variant, channel_multiplier=1.0, depth_multiplier=1.0, group_size=None, rw=False, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Small model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 NOTE: `rw` flag sets up 'small' variant to behave like my initial v2 small model, before ref the impl was released. """ arch_def = [ ['cn_r2_k3_s1_e1_c24_skip'], ['er_r4_k3_s2_e4_c48'], ['er_r4_k3_s2_e4_c64'], ['ir_r6_k3_s2_e4_c128_se0.25'], ['ir_r9_k3_s1_e6_c160_se0.25'], ['ir_r15_k3_s2_e6_c256_se0.25'], ] num_features = 1280 if rw: # my original variant, based on paper figure differs from the official release arch_def[0] = ['er_r2_k3_s1_e1_c24'] arch_def[-1] = ['ir_r15_k3_s2_e6_c272_se0.25'] num_features = 1792 round_chs_fn = partial(round_channels, multiplier=channel_multiplier) model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, group_size=group_size), num_features=round_chs_fn(num_features), stem_size=24, round_chs_fn=round_chs_fn, norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Medium model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r3_k3_s1_e1_c24_skip'], ['er_r5_k3_s2_e4_c48'], ['er_r5_k3_s2_e4_c80'], ['ir_r7_k3_s2_e4_c160_se0.25'], ['ir_r14_k3_s1_e6_c176_se0.25'], ['ir_r18_k3_s2_e6_c304_se0.25'], ['ir_r5_k3_s1_e6_c512_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_l(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r7_k3_s2_e4_c64'], ['er_r7_k3_s2_e4_c96'], ['ir_r10_k3_s2_e4_c192_se0.25'], ['ir_r19_k3_s1_e6_c224_se0.25'], ['ir_r25_k3_s2_e6_c384_se0.25'], ['ir_r7_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_efficientnetv2_xl(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """ Creates an EfficientNet-V2 Xtra-Large model Ref impl: https://github.com/google/automl/tree/master/efficientnetv2 Paper: `EfficientNetV2: Smaller Models and Faster Training` - https://arxiv.org/abs/2104.00298 """ arch_def = [ ['cn_r4_k3_s1_e1_c32_skip'], ['er_r8_k3_s2_e4_c64'], ['er_r8_k3_s2_e4_c96'], ['ir_r16_k3_s2_e4_c192_se0.25'], ['ir_r24_k3_s1_e6_c256_se0.25'], ['ir_r32_k3_s2_e6_c512_se0.25'], ['ir_r8_k3_s1_e6_c640_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier), num_features=1280, stem_size=32, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'silu'), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Small model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # relu # stage 1, 112x112 in ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_mixnet_m(variant, channel_multiplier=1.0, depth_multiplier=1.0, pretrained=False, **kwargs): """Creates a MixNet Medium-Large model. Ref impl: https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet/mixnet Paper: https://arxiv.org/abs/1907.09595 """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c24'], # relu # stage 1, 112x112 in ['ir_r1_k3.5.7_a1.1_p1.1_s2_e6_c32', 'ir_r1_k3_a1.1_p1.1_s1_e3_c32'], # relu # stage 2, 56x56 in ['ir_r1_k3.5.7.9_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], # swish # stage 3, 28x28 in ['ir_r1_k3.5.7_s2_e6_c80_se0.25_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e6_c80_se0.25_nsw'], # swish # stage 4, 14x14in ['ir_r1_k3_s1_e6_c120_se0.5_nsw', 'ir_r3_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], # swish # stage 5, 14x14in ['ir_r1_k3.5.7.9_s2_e6_c200_se0.5_nsw', 'ir_r3_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw'], # swish # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=1536, stem_size=24, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _gen_tinynet( variant, model_width=1.0, depth_multiplier=1.0, pretrained=False, **kwargs ): """Creates a TinyNet model. """ arch_def = [ ['ds_r1_k3_s1_e1_c16_se0.25'], ['ir_r2_k3_s2_e6_c24_se0.25'], ['ir_r2_k5_s2_e6_c40_se0.25'], ['ir_r3_k3_s2_e6_c80_se0.25'], ['ir_r3_k5_s1_e6_c112_se0.25'], ['ir_r4_k5_s2_e6_c192_se0.25'], ['ir_r1_k3_s1_e6_c320_se0.25'], ] model_kwargs = dict( block_args=decode_arch_def(arch_def, depth_multiplier, depth_trunc='round'), num_features=max(1280, round_channels(1280, model_width, 8, None)), stem_size=32, fix_stem=True, round_chs_fn=partial(round_channels, multiplier=model_width), act_layer=resolve_act_layer(kwargs, 'swish'), norm_layer=kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), **kwargs, ) model = _create_effnet(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mnasnet_050.untrained': _cfg(), 'mnasnet_075.untrained': _cfg(), 'mnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth', hf_hub_id='timm/'), 'mnasnet_140.untrained': _cfg(), 'semnasnet_050.untrained': _cfg(), 'semnasnet_075.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/semnasnet_075-18710866.pth', hf_hub_id='timm/'), 'semnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth', hf_hub_id='timm/'), 'semnasnet_140.untrained': _cfg(), 'mnasnet_small.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth', hf_hub_id='timm/'), 'mobilenetv2_035.untrained': _cfg(), 'mobilenetv2_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth', hf_hub_id='timm/', interpolation='bicubic', ), 'mobilenetv2_075.untrained': _cfg(), 'mobilenetv2_100.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_100_ra-b33bc2c4.pth', hf_hub_id='timm/'), 'mobilenetv2_110d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_110d_ra-77090ade.pth', hf_hub_id='timm/'), 'mobilenetv2_120d.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_120d_ra-5987e2ed.pth', hf_hub_id='timm/'), 'mobilenetv2_140.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_140_ra-21a4e913.pth', hf_hub_id='timm/'), 'fbnetc_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth', hf_hub_id='timm/', interpolation='bilinear'), 'spnasnet_100.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/spnasnet_100-048bc3f4.pth', hf_hub_id='timm/', interpolation='bilinear'), # NOTE experimenting with alternate attention 'efficientnet_b0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b0_ra-3dd342df.pth', hf_hub_id='timm/'), 'efficientnet_b1.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b1-533bc792.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=1.0), 'efficientnet_b2.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b2_ra-bcdf34b7.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), test_input_size=(3, 288, 288), crop_pct=1.0), 'efficientnet_b3.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b3_ra2-cf984f9c.pth', hf_hub_id='timm/', input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b4.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_b4_ra2_320-7eb33cd5.pth', hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), crop_pct=1.0), 'efficientnet_b5.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, crop_mode='squash'), 'efficientnet_b5.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.95, num_classes=11821), 'efficientnet_b6.untrained': _cfg( url='', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'efficientnet_b7.untrained': _cfg( url='', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'efficientnet_b8.untrained': _cfg( url='', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'efficientnet_l2.untrained': _cfg( url='', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.961), # FIXME experimental 'efficientnet_b0_gn.untrained': _cfg(), 'efficientnet_b0_g8_gn.untrained': _cfg(), 'efficientnet_b0_g16_evos.untrained': _cfg(), 'efficientnet_b3_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_b3_g8_gn.untrained': _cfg( input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), crop_pct=1.0), 'efficientnet_es.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_ra-f111e99c.pth', hf_hub_id='timm/'), 'efficientnet_em.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_em_ra2-66250f76.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_el.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el-3b455510.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_es_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_es_pruned75-1b7248cf.pth', hf_hub_id='timm/'), 'efficientnet_el_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_el_pruned70-ef2a2ccf.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_cc_b0_4e.untrained': _cfg(), 'efficientnet_cc_b0_8e.untrained': _cfg(), 'efficientnet_cc_b1_8e.untrained': _cfg(input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite0.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_lite0_ra-37913777.pth', hf_hub_id='timm/'), 'efficientnet_lite1.untrained': _cfg( input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'efficientnet_lite2.untrained': _cfg( input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'efficientnet_lite3.untrained': _cfg( input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'efficientnet_lite4.untrained': _cfg( input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'efficientnet_b1_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb1_pruned-bea43a3a.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b2_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb2_pruned-08c1b27c.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnet_b3_pruned.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/effnetb3_pruned-59ecf72d.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'efficientnetv2_rw_t.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_t_agc-3620981a.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'gc_efficientnetv2_rw_t.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gc_efficientnetv2_rw_t_agc-927a0bde.pth', hf_hub_id='timm/', input_size=(3, 224, 224), test_input_size=(3, 288, 288), pool_size=(7, 7), crop_pct=1.0), 'efficientnetv2_rw_s.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnet_v2s_ra2_288-a6477665.pth', hf_hub_id='timm/', input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_rw_m.agc_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/efficientnetv2_rw_m_agc-3d90cb1e.pth', hf_hub_id='timm/', input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_s.untrained': _cfg( input_size=(3, 288, 288), test_input_size=(3, 384, 384), pool_size=(9, 9), crop_pct=1.0), 'efficientnetv2_m.untrained': _cfg( input_size=(3, 320, 320), test_input_size=(3, 416, 416), pool_size=(10, 10), crop_pct=1.0), 'efficientnetv2_l.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0), 'efficientnetv2_xl.untrained': _cfg( input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0), 'tf_efficientnet_b0.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ns-c0e6a31c.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ns-99dd0c41.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ns-00306e48.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ns-9d44bf68.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ns-d6313a46.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ns-6f26d0cf.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ns-51548356.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ns-1dbc32de.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_l2.ns_jft_in1k_475': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns_475-bebbd00a.pth', hf_hub_id='timm/', input_size=(3, 475, 475), pool_size=(15, 15), crop_pct=0.936), 'tf_efficientnet_l2.ns_jft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_l2_ns-df73bb44.pth', hf_hub_id='timm/', input_size=(3, 800, 800), pool_size=(25, 25), crop_pct=0.96), 'tf_efficientnet_b0.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 224, 224)), 'tf_efficientnet_b1.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ap_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b5.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ra-9a3e5369.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b7.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ra-6c08e654.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b8.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ra-572d5dd9.pth', hf_hub_id='timm/', input_size=(3, 672, 672), pool_size=(21, 21), crop_pct=0.954), 'tf_efficientnet_b0.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_aa-827b6e33.pth', hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_aa-ea7a6ee0.pth', hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_aa-60c94f97.pth', hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_aa-84b4657e.pth', hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_aa-818f208c.pth', hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_aa-99018a74.pth', hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_b6.aa_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_aa-80ba17e4.pth', hf_hub_id='timm/', input_size=(3, 528, 528), pool_size=(17, 17), crop_pct=0.942), 'tf_efficientnet_b7.aa_in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_aa-076e3472.pth', hf_hub_id='timm/', input_size=(3, 600, 600), pool_size=(19, 19), crop_pct=0.949), 'tf_efficientnet_b0.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0-0af12548.pth', #hf_hub_id='timm/', input_size=(3, 224, 224)), 'tf_efficientnet_b1.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1-5c1377c4.pth', #hf_hub_id='timm/', input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_b2.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2-e393ef04.pth', #hf_hub_id='timm/', input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890), 'tf_efficientnet_b3.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3-e3bd6955.pth', #hf_hub_id='timm/', input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_b4.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4-74ee3bed.pth', #hf_hub_id='timm/', input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.922), 'tf_efficientnet_b5.in1k': _cfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5-c6949ce9.pth', #hf_hub_id='timm/', input_size=(3, 456, 456), pool_size=(15, 15), crop_pct=0.934), 'tf_efficientnet_es.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 224, 224), ), 'tf_efficientnet_em.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_em-e78cfe58.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_el.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_el-5143854e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904), 'tf_efficientnet_cc_b0_4e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_4e-4362b6b2.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b0_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b0_8e-66184a25.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_efficientnet_cc_b1_8e.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_cc_b1_8e-f7c79ae1.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882), 'tf_efficientnet_lite0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite0-0aa007d2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite1-bde8b488.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 240, 240), pool_size=(8, 8), crop_pct=0.882, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite2-dcccb7df.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 260, 260), pool_size=(9, 9), crop_pct=0.890, interpolation='bicubic', # should be bilinear but bicubic better match for TF bilinear at low res ), 'tf_efficientnet_lite3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite3-b733e338.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), pool_size=(10, 10), crop_pct=0.904, interpolation='bilinear'), 'tf_efficientnet_lite4.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_lite4-741542c3.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 380, 380), pool_size=(12, 12), crop_pct=0.920, interpolation='bilinear'), 'tf_efficientnetv2_s.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_s.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 300, 300), test_input_size=(3, 384, 384), pool_size=(10, 10), crop_pct=1.0), 'tf_efficientnetv2_m.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_l.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 480, 480), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_xl.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), num_classes=21843, input_size=(3, 384, 384), test_input_size=(3, 512, 512), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'tf_efficientnetv2_b0.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 224, 224), pool_size=(6, 6)), 'tf_efficientnetv2_b1.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth', hf_hub_id='timm/', input_size=(3, 192, 192), test_input_size=(3, 240, 240), pool_size=(6, 6), crop_pct=0.882), 'tf_efficientnetv2_b2.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth', hf_hub_id='timm/', input_size=(3, 208, 208), test_input_size=(3, 260, 260), pool_size=(7, 7), crop_pct=0.890), 'tf_efficientnetv2_b3.in21k_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.9, crop_mode='squash'), 'tf_efficientnetv2_b3.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'tf_efficientnetv2_b3.in21k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, num_classes=21843, input_size=(3, 240, 240), test_input_size=(3, 300, 300), pool_size=(8, 8), crop_pct=0.904), 'mixnet_s.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth', hf_hub_id='timm/'), 'mixnet_m.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth', hf_hub_id='timm/'), 'mixnet_l.ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth', hf_hub_id='timm/'), 'mixnet_xl.ra_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth', hf_hub_id='timm/'), 'mixnet_xxl.untrained': _cfg(), 'tf_mixnet_s.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_s-89d3354b.pth', hf_hub_id='timm/'), 'tf_mixnet_m.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_m-0f4d8805.pth', hf_hub_id='timm/'), 'tf_mixnet_l.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mixnet_l-6c92e0c8.pth', hf_hub_id='timm/'), "tinynet_a.in1k": _cfg( input_size=(3, 192, 192), pool_size=(6, 6), # int(224 * 0.86) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_a.pth', hf_hub_id='timm/'), "tinynet_b.in1k": _cfg( input_size=(3, 188, 188), pool_size=(6, 6), # int(224 * 0.84) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_b.pth', hf_hub_id='timm/'), "tinynet_c.in1k": _cfg( input_size=(3, 184, 184), pool_size=(6, 6), # int(224 * 0.825) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_c.pth', hf_hub_id='timm/'), "tinynet_d.in1k": _cfg( input_size=(3, 152, 152), pool_size=(5, 5), # int(224 * 0.68) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_d.pth', hf_hub_id='timm/'), "tinynet_e.in1k": _cfg( input_size=(3, 106, 106), pool_size=(4, 4), # int(224 * 0.475) url='https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth', hf_hub_id='timm/'), }) @register_model def mnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.5. """ model = _gen_mnasnet_b1('mnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 0.75. """ model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.0. """ model = _gen_mnasnet_b1('mnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.0. """ return mnasnet_100(pretrained, **kwargs) @register_model def mnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet B1, depth multiplier of 1.4 """ model = _gen_mnasnet_b1('mnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_050(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.5 """ model = _gen_mnasnet_a1('semnasnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_075(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 0.75. """ model = _gen_mnasnet_a1('semnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def semnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ model = _gen_mnasnet_a1('semnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_a1(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.0. """ return semnasnet_100(pretrained, **kwargs) @register_model def semnasnet_140(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet A1 (w/ SE), depth multiplier of 1.4. """ model = _gen_mnasnet_a1('semnasnet_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet: """ MNASNet Small, depth multiplier of 1.0. """ model = _gen_mnasnet_small('mnasnet_small', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_035(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.35 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_050(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.5 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_075(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 0.75 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_100(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.0 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_140(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.4 channel multiplier """ model = _gen_mobilenet_v2('mobilenetv2_140', 1.4, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_110d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.1 channel, 1.2 depth multipliers""" model = _gen_mobilenet_v2( 'mobilenetv2_110d', 1.1, depth_multiplier=1.2, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv2_120d(pretrained=False, **kwargs) -> EfficientNet: """ MobileNet V2 w/ 1.2 channel, 1.4 depth multipliers """ model = _gen_mobilenet_v2( 'mobilenetv2_120d', 1.2, depth_multiplier=1.4, fix_stem_head=True, pretrained=pretrained, **kwargs) return model @register_model def fbnetc_100(pretrained=False, **kwargs) -> EfficientNet: """ FBNet-C """ if pretrained: # pretrained model trained with non-default BN epsilon kwargs['bn_eps'] = BN_EPS_TF_DEFAULT model = _gen_fbnetc('fbnetc_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def spnasnet_100(pretrained=False, **kwargs) -> EfficientNet: """ Single-Path NAS Pixel1""" model = _gen_spnasnet('spnasnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2a(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 @ 288x288 w/ 1.0 test crop""" # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now return efficientnet_b2(pretrained=pretrained, **kwargs) @register_model def efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3a(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 @ 320x320 w/ 1.0 test crop-pct """ # WARN this model def is deprecated, different train/test res + test crop handled by default_cfg now return efficientnet_b3(pretrained=pretrained, **kwargs) @register_model def efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8 """ # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2.""" # NOTE for train, drop_rate should be 0.5, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model # FIXME experimental group cong / GroupNorm / EvoNorm experiments @register_model def efficientnet_b0_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_gn', norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group conv + GroupNorm""" model = _gen_efficientnet( 'efficientnet_b0_g8_gn', group_size=8, norm_layer=partial(GroupNormAct, group_size=8), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b0_g16_evos(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0 w/ group 16 conv + EvoNorm""" model = _gen_efficientnet( 'efficientnet_b0_g16_evos', group_size=16, channel_divisor=16, pretrained=pretrained, **kwargs) #norm_layer=partial(EvoNorm2dS0, group_size=16), return model @register_model def efficientnet_b3_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ GroupNorm """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_gn', channel_multiplier=1.2, depth_multiplier=1.4, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_g8_gn(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 w/ grouped conv + BN""" # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet( 'efficientnet_b3_g8_gn', channel_multiplier=1.2, depth_multiplier=1.4, group_size=8, channel_divisor=16, norm_layer=partial(GroupNormAct, group_size=16), pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. """ model = _gen_efficientnet_edge( 'efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_es_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small Pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_es_pruned', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. """ model = _gen_efficientnet_edge( 'efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. """ model = _gen_efficientnet_edge( 'efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_el_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large pruned. For more info: https://github.com/DeGirum/pruned-models/releases/tag/efficientnet_v1.0""" model = _gen_efficientnet_edge( 'efficientnet_el_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_condconv( 'efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 model = _gen_efficientnet_lite( 'efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b1_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' variant = 'efficientnet_b1_pruned' model = _gen_efficientnet( variant, channel_multiplier=1.0, depth_multiplier=1.1, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b2_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'efficientnet_b2_pruned', channel_multiplier=1.1, depth_multiplier=1.2, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnet_b3_pruned(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3 Pruned. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pruned=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, pretrained=pretrained, **kwargs) return model @register_model def gc_efficientnetv2_rw_t(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Tiny w/ Global Context Attn (Custom variant, tiny not in paper). """ model = _gen_efficientnetv2_s( 'gc_efficientnetv2_rw_t', channel_multiplier=0.8, depth_multiplier=0.9, rw=False, se_layer='gc', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small (RW variant). NOTE: This is my initial (pre official code release) w/ some differences. See efficientnetv2_s and tf_efficientnetv2_s for versions that match the official w/ PyTorch vs TF padding """ model = _gen_efficientnetv2_s('efficientnetv2_rw_s', rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_rw_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium (RW variant). """ model = _gen_efficientnetv2_s( 'efficientnetv2_rw_m', channel_multiplier=1.2, depth_multiplier=(1.2,) * 4 + (1.6,) * 2, rw=True, pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. """ model = _gen_efficientnetv2_s('efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. """ model = _gen_efficientnetv2_m('efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. """ model = _gen_efficientnetv2_l('efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. """ model = _gen_efficientnetv2_xl('efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B0. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B1. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B2. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B3. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B4. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b5(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B5. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b5', channel_multiplier=1.6, depth_multiplier=2.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b6(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B6. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b6', channel_multiplier=1.8, depth_multiplier=2.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b7(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B7. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b7', channel_multiplier=2.0, depth_multiplier=3.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_b8(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-B8. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_b8', channel_multiplier=2.2, depth_multiplier=3.6, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_l2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-L2 NoisyStudent. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.5 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet( 'tf_efficientnet_l2', channel_multiplier=4.3, depth_multiplier=5.3, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_es(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge Small. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge( 'tf_efficientnet_es', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_em(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Medium. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge( 'tf_efficientnet_em', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_el(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Edge-Large. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_edge( 'tf_efficientnet_el', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_4e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 4 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_4e', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b0_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B0 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b0_8e', channel_multiplier=1.0, depth_multiplier=1.0, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_cc_b1_8e(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-CondConv-B1 w/ 8 Experts. Tensorflow compatible variant """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_condconv( 'tf_efficientnet_cc_b1_8e', channel_multiplier=1.0, depth_multiplier=1.1, experts_multiplier=2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite0 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite0', channel_multiplier=1.0, depth_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite1 """ # NOTE for train, drop_rate should be 0.2, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite2 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite3 """ # NOTE for train, drop_rate should be 0.3, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnet_lite4(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-Lite4 """ # NOTE for train, drop_rate should be 0.4, drop_path_rate should be 0.2 kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet_lite( 'tf_efficientnet_lite4', channel_multiplier=1.4, depth_multiplier=1.8, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_s(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Small. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_s('tf_efficientnetv2_s', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_m(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Medium. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_m('tf_efficientnetv2_m', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_l(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Large. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_l('tf_efficientnetv2_l', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_xl(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2 Xtra-Large. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_xl('tf_efficientnetv2_xl', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b0(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B0. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_base('tf_efficientnetv2_b0', pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b1(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B1. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b1', channel_multiplier=1.0, depth_multiplier=1.1, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b2(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B2. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b2', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def tf_efficientnetv2_b3(pretrained=False, **kwargs) -> EfficientNet: """ EfficientNet-V2-B3. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnetv2_base( 'tf_efficientnetv2_b3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model @register_model def mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. """ model = _gen_mixnet_s( 'mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. """ model = _gen_mixnet_m( 'mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. """ model = _gen_mixnet_m( 'mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Extra-Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xl', channel_multiplier=1.6, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model @register_model def mixnet_xxl(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Double Extra Large model. Not a paper spec, experimental def by RW w/ depth scaling. """ model = _gen_mixnet_m( 'mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_s(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Small model. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_s( 'tf_mixnet_s', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_m(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Medium model. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_m( 'tf_mixnet_m', channel_multiplier=1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mixnet_l(pretrained=False, **kwargs) -> EfficientNet: """Creates a MixNet Large model. Tensorflow compatible variant """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mixnet_m( 'tf_mixnet_l', channel_multiplier=1.3, pretrained=pretrained, **kwargs) return model @register_model def tinynet_a(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_a', 1.0, 1.2, pretrained=pretrained, **kwargs) return model @register_model def tinynet_b(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_b', 0.75, 1.1, pretrained=pretrained, **kwargs) return model @register_model def tinynet_c(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_c', 0.54, 0.85, pretrained=pretrained, **kwargs) return model @register_model def tinynet_d(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_d', 0.54, 0.695, pretrained=pretrained, **kwargs) return model @register_model def tinynet_e(pretrained=False, **kwargs) -> EfficientNet: model = _gen_tinynet('tinynet_e', 0.51, 0.6, pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'tf_efficientnet_b0_ap': 'tf_efficientnet_b0.ap_in1k', 'tf_efficientnet_b1_ap': 'tf_efficientnet_b1.ap_in1k', 'tf_efficientnet_b2_ap': 'tf_efficientnet_b2.ap_in1k', 'tf_efficientnet_b3_ap': 'tf_efficientnet_b3.ap_in1k', 'tf_efficientnet_b4_ap': 'tf_efficientnet_b4.ap_in1k', 'tf_efficientnet_b5_ap': 'tf_efficientnet_b5.ap_in1k', 'tf_efficientnet_b6_ap': 'tf_efficientnet_b6.ap_in1k', 'tf_efficientnet_b7_ap': 'tf_efficientnet_b7.ap_in1k', 'tf_efficientnet_b8_ap': 'tf_efficientnet_b8.ap_in1k', 'tf_efficientnet_b0_ns': 'tf_efficientnet_b0.ns_jft_in1k', 'tf_efficientnet_b1_ns': 'tf_efficientnet_b1.ns_jft_in1k', 'tf_efficientnet_b2_ns': 'tf_efficientnet_b2.ns_jft_in1k', 'tf_efficientnet_b3_ns': 'tf_efficientnet_b3.ns_jft_in1k', 'tf_efficientnet_b4_ns': 'tf_efficientnet_b4.ns_jft_in1k', 'tf_efficientnet_b5_ns': 'tf_efficientnet_b5.ns_jft_in1k', 'tf_efficientnet_b6_ns': 'tf_efficientnet_b6.ns_jft_in1k', 'tf_efficientnet_b7_ns': 'tf_efficientnet_b7.ns_jft_in1k', 'tf_efficientnet_l2_ns_475': 'tf_efficientnet_l2.ns_jft_in1k_475', 'tf_efficientnet_l2_ns': 'tf_efficientnet_l2.ns_jft_in1k', 'tf_efficientnetv2_s_in21ft1k': 'tf_efficientnetv2_s.in21k_ft_in1k', 'tf_efficientnetv2_m_in21ft1k': 'tf_efficientnetv2_m.in21k_ft_in1k', 'tf_efficientnetv2_l_in21ft1k': 'tf_efficientnetv2_l.in21k_ft_in1k', 'tf_efficientnetv2_xl_in21ft1k': 'tf_efficientnetv2_xl.in21k_ft_in1k', 'tf_efficientnetv2_s_in21k': 'tf_efficientnetv2_s.in21k', 'tf_efficientnetv2_m_in21k': 'tf_efficientnetv2_m.in21k', 'tf_efficientnetv2_l_in21k': 'tf_efficientnetv2_l.in21k', 'tf_efficientnetv2_xl_in21k': 'tf_efficientnetv2_xl.in21k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/eva.py
""" EVA EVA from https://github.com/baaivision/EVA , paper: https://arxiv.org/abs/2211.07636 @article{EVA, title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2211.07636}, year={2022} } EVA-02: A Visual Representation for Neon Genesis - https://arxiv.org/abs/2303.11331 @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } This file contains EVA & EVA02 model implementations evolved from BEiT, additional models in vision_transformer.py. Modifications by / Copyright 2023 Ross Wightman, original copyrights below """ # EVA models Copyright (c) 2022 BAAI-Vision # EVA02 models Copyright (c) 2023 BAAI-Vision import math from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, GluMlp, SwiGLU, LayerNorm, DropPath, PatchDropout, RotaryEmbeddingCat, \ apply_rot_embed_cat, apply_keep_indices_nlc, trunc_normal_, resample_patch_embed, resample_abs_pos_embed, \ to_2tuple, use_fused_attn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model __all__ = ['Eva'] class EvaAttention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, num_heads: int = 8, qkv_bias: bool = True, qkv_fused: bool = True, attn_drop: float = 0., proj_drop: float = 0., attn_head_dim: Optional[int] = None, norm_layer: Optional[Callable] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: attn_drop: proj_drop: attn_head_dim: norm_layer: """ super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() if qkv_fused: self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) self.q_proj = self.k_proj = self.v_proj = None if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = self.k_bias = self.v_bias = None else: self.q_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.k_proj = nn.Linear(dim, all_head_dim, bias=False) self.v_proj = nn.Linear(dim, all_head_dim, bias=qkv_bias) self.qkv = None self.q_bias = self.k_bias = self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.norm = norm_layer(all_head_dim) if norm_layer is not None else nn.Identity() self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward( self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None, ): B, N, C = x.shape if self.qkv is not None: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim else: q = self.q_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) # B, num_heads, N, C k = self.k_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) v = self.v_proj(x).reshape(B, N, self.num_heads, -1).transpose(1, 2) if rope is not None: q = torch.cat([q[:, :, :1, :], apply_rot_embed_cat(q[:, :, 1:, :], rope)], 2).type_as(v) k = torch.cat([k[:, :, :1, :], apply_rot_embed_cat(k[:, :, 1:, :], rope)], 2).type_as(v) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) attn = attn.softmax(dim=-1) if attn_mask is not None: attn_mask = attn_mask.to(torch.bool) attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.norm(x) x = self.proj(x) x = self.proj_drop(x) return x class EvaBlock(nn.Module): def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.norm1 = norm_layer(dim) self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.gamma_1 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.gamma_2 = nn.Parameter(init_values * torch.ones(dim)) if init_values is not None else None self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): if self.gamma_1 is None: x = x + self.drop_path1(self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.mlp(self.norm2(x))) else: x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), rope=rope, attn_mask=attn_mask)) x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x))) return x class EvaBlockPostNorm(nn.Module): """ EVA block w/ post-norm and support for swiglu, MLP norm scale, ROPE. """ def __init__( self, dim: int, num_heads: int, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., init_values: Optional[float] = None, # ignore for post-norm act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, attn_head_dim: Optional[int] = None, ): """ Args: dim: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: proj_drop: attn_drop: drop_path: init_values: act_layer: norm_layer: attn_head_dim: """ super().__init__() self.attn = EvaAttention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, attn_drop=attn_drop, proj_drop=proj_drop, attn_head_dim=attn_head_dim, norm_layer=norm_layer if scale_attn_inner else None, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() hidden_features = int(dim * mlp_ratio) if swiglu_mlp: if scale_mlp: # when norm in SwiGLU used, an impl with separate fc for gate & x is used self.mlp = SwiGLU( in_features=dim, hidden_features=hidden_features, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) else: # w/o any extra norm, an impl with packed fc1 weights is used, matches existing GluMLP self.mlp = GluMlp( in_features=dim, hidden_features=hidden_features * 2, norm_layer=norm_layer if scale_mlp else None, act_layer=nn.SiLU, gate_last=False, drop=proj_drop, ) else: self.mlp = Mlp( in_features=dim, hidden_features=hidden_features, act_layer=act_layer, norm_layer=norm_layer if scale_mlp else None, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, rope: Optional[torch.Tensor] = None, attn_mask: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, rope=rope, attn_mask=attn_mask))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class Eva(nn.Module): """ Eva Vision Transformer w/ Abs & Rotary Pos Embed This class implements the EVA and EVA02 models that were based on the BEiT ViT variant * EVA - abs pos embed, global avg pool * EVA02 - abs + rope pos embed, global avg pool, SwiGLU, scale Norm in MLP (ala normformer) """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, qkv_bias: bool = True, qkv_fused: bool = True, mlp_ratio: float = 4., swiglu_mlp: bool = False, scale_mlp: bool = False, scale_attn_inner: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., norm_layer: Callable = LayerNorm, init_values: Optional[float] = None, class_token: bool = True, use_abs_pos_emb: bool = True, use_rot_pos_emb: bool = False, use_post_norm: bool = False, ref_feat_shape: Optional[Union[Tuple[int, int], int]] = None, head_init_scale: float = 0.001, ): """ Args: img_size: patch_size: in_chans: num_classes: global_pool: embed_dim: depth: num_heads: qkv_bias: qkv_fused: mlp_ratio: swiglu_mlp: scale_mlp: scale_attn_inner: drop_rate: pos_drop_rate: proj_drop_rate: attn_drop_rate: drop_path_rate: norm_layer: init_values: class_token: use_abs_pos_emb: use_rot_pos_emb: use_post_norm: ref_feat_shape: head_init_scale: """ super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.grad_checkpointing = False self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None self.pos_embed = nn.Parameter( torch.zeros(1, num_patches + self.num_prefix_tokens, embed_dim)) if use_abs_pos_emb else None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, return_indices=True, ) else: self.patch_drop = None if use_rot_pos_emb: ref_feat_shape = to_2tuple(ref_feat_shape) if ref_feat_shape is not None else None self.rope = RotaryEmbeddingCat( embed_dim // num_heads, in_pixels=False, feat_shape=self.patch_embed.grid_size, ref_feat_shape=ref_feat_shape, ) else: self.rope = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule block_fn = EvaBlockPostNorm if use_post_norm else EvaBlock self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, qkv_bias=qkv_bias, qkv_fused=qkv_fused, mlp_ratio=mlp_ratio, swiglu_mlp=swiglu_mlp, scale_mlp=scale_mlp, scale_attn_inner=scale_attn_inner, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, ) for i in range(depth)]) use_fc_norm = self.global_pool == 'avg' self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.fix_init_weight() if isinstance(self.head, nn.Linear): trunc_normal_(self.head.weight, std=.02) self.head.weight.data.mul_(head_init_scale) self.head.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def no_weight_decay(self): nwd = {'pos_embed', 'cls_token'} return nwd @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))], ) return matcher @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) # apply abs position embedding if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) # obtain shared rotary position embedding and apply patch dropout rot_pos_embed = self.rope.get_embed() if self.rope is not None else None if self.patch_drop is not None: x, keep_indices = self.patch_drop(x) if rot_pos_embed is not None and keep_indices is not None: rot_pos_embed = apply_keep_indices_nlc(x, rot_pos_embed, keep_indices) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, rope=rot_pos_embed) else: x = blk(x, rope=rot_pos_embed) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, interpolation='bicubic', antialias=True, ): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} state_dict = state_dict.get('model_ema', state_dict) state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('module', state_dict) state_dict = state_dict.get('state_dict', state_dict) # prefix for loading OpenCLIP compatible weights if 'visual.trunk.pos_embed' in state_dict: prefix = 'visual.trunk.' elif 'visual.pos_embed' in state_dict: prefix = 'visual.' else: prefix = '' mim_weights = prefix + 'mask_token' in state_dict no_qkv = prefix + 'blocks.0.attn.q_proj.weight' in state_dict len_prefix = len(prefix) for k, v in state_dict.items(): if prefix: if k.startswith(prefix): k = k[len_prefix:] else: continue if 'rope' in k: # fixed embedding no need to load buffer from checkpoint continue if 'patch_embed.proj.weight' in k: _, _, H, W = model.patch_embed.proj.weight.shape if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) k = k.replace('mlp.ffn_ln', 'mlp.norm') k = k.replace('attn.inner_attn_ln', 'attn.norm') k = k.replace('mlp.w12', 'mlp.fc1') k = k.replace('mlp.w1', 'mlp.fc1_g') k = k.replace('mlp.w2', 'mlp.fc1_x') k = k.replace('mlp.w3', 'mlp.fc2') if no_qkv: k = k.replace('q_bias', 'q_proj.bias') k = k.replace('v_bias', 'v_proj.bias') if mim_weights and k in ('mask_token', 'lm_head.weight', 'lm_head.bias', 'norm.weight', 'norm.bias'): if k == 'norm.weight' or k == 'norm.bias': # try moving norm -> fc norm on fine-tune, probably a better starting point than new init k = k.replace('norm', 'fc_norm') else: # skip pretrain mask token & head weights continue out_dict[k] = v return out_dict def _create_eva(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Eva models.') model = build_model_with_cfg( Eva, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': OPENAI_CLIP_MEAN, 'std': OPENAI_CLIP_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ # EVA 01 CLIP fine-tuned on imagenet-1k 'eva_giant_patch14_224.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz224_ftcls_89p1.pt', hf_hub_id='timm/', ), 'eva_giant_patch14_336.clip_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_clip_vis_enc_sz336_ftcls_89p4.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), # MIM EVA 01 pretrain, ft on in22k -> in1k 'eva_giant_patch14_336.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_336px_psz14_ema_89p6.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_giant_patch14_560.m30m_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_21k_1k_560px_psz14_ema_89p7.pt', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 560, 560), crop_pct=1.0, crop_mode='squash'), # in22k or m38m MIM pretrain w/ intermediate in22k fine-tune and final in1k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_B_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_in22k_ft_in22k_in1k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_in21k_medft_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), 'eva02_large_patch14_448.mim_m38m_ft_in22k_in1k': _cfg( hf_hub_id='timm/', #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k_to_in1k/eva02_L_pt_m38m_medft_in21k_ft_in1k_p14.pt', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', ), # in22k or m3m MIM pretrain w/ in1k fine-tune 'eva02_tiny_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_small_patch14_336.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 336, 336), crop_pct=1.0, ), 'eva02_base_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_in22k_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_in21k_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), 'eva02_large_patch14_448.mim_m38m_ft_in1k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in1k/eva02_L_pt_m38m_ft_in1k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, ), # in22k or m3m MIM pretrain w/ in22k fine-tune 'eva02_base_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_in22k_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), 'eva02_large_patch14_448.mim_m38m_ft_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt', hf_hub_id='timm/', input_size=(3, 448, 448), crop_pct=1.0, crop_mode='squash', num_classes=21841, ), # in22k or m38m MIM pretrain 'eva02_tiny_patch14_224.mim_in22k': _cfg( # hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_Ti_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_small_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_S_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_base_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_B_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_in22k': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_in21k_p14.pt', hf_hub_id='timm/', num_classes=0, ), 'eva02_large_patch14_224.mim_m38m': _cfg( #hf_hub_id='Yuxin-CV/EVA-02', hf_hub_filename='eva02/pt/eva02_L_pt_m38m_p14.pt', hf_hub_id='timm/', num_classes=0, ), # EVA01 and EVA02 CLIP image towers 'eva_giant_patch14_clip_224.laion400m': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', hf_hub_id='timm/eva_giant_patch14_clip_224.laion400m_s11b_b41k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva_giant_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA01_CLIP_g_14_plus_psz14_s11B.pt', hf_hub_id='timm/eva_giant_patch14_plus_clip_224.merged2b_s11b_b114k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_base_patch16_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_base_patch16_clip_224.merged2b_s8b_b131k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=512, ), 'eva02_large_patch14_clip_224.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_large_patch14_clip_224.merged2b_s4b_b131k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=768, ), 'eva02_large_patch14_clip_336.merged2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_L_psz14_s4B.pt', hf_hub_id='timm/eva02_large_patch14_clip_336.merged2b_s6b_b61k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', input_size=(3, 336, 336), crop_pct=1.0, num_classes=768, ), 'eva02_enormous_patch14_clip_224.laion2b': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', hf_hub_id='timm/eva02_enormous_patch14_clip_224.laion2b_s4b_b115k', # float16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.laion2b_plus': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_CLIP_E_psz14_plus_s9B.pt', hf_hub_id='timm/eva02_enormous_patch14_plus_clip_224.laion2b_s9b_b144k', # bfloat16 weights hf_hub_filename='open_clip_pytorch_model.bin', num_classes=1024, ), 'eva02_enormous_patch14_clip_224.pretrain': _cfg( # hf_hub_id='QuanSun/EVA-CLIP', hf_hub_filename='EVA02_E_psz14.pt', num_classes=0, ), }) @register_model def eva_giant_patch14_224(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_336(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_560(pretrained=False, **kwargs) -> Eva: """ EVA-g model https://arxiv.org/abs/2211.07636 """ model_args = dict(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408) model = _create_eva('eva_giant_patch14_560', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_224(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_tiny_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_tiny_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_small_patch14_336(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=336, patch_size=14, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_small_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_base_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_448(pretrained=False, **kwargs) -> Eva: model_args = dict( img_size=448, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 ) model = _create_eva('eva02_large_patch14_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_giant_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ EVA-g CLIP model (only difference from non-CLIP is the pooling) """ model_args = dict( patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=6144 / 1408, global_pool=kwargs.pop('global_pool', 'token')) model = _create_eva('eva_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_base_patch16_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_base """ model_args = dict( img_size=224, patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_fused=False, mlp_ratio=4 * 2 / 3, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=224, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_large_patch14_clip_336(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that adds additional attn scale layernorm to eva02_large """ model_args = dict( img_size=336, patch_size=14, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4 * 2 / 3, qkv_fused=False, swiglu_mlp=True, scale_mlp=True, scale_attn_inner=True, use_rot_pos_emb=True, ref_feat_shape=(16, 16), # 224/14 global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva02_enormous_patch14_clip_224(pretrained=False, **kwargs) -> Eva: """ A EVA-CLIP specific variant that uses residual post-norm in blocks """ model_args = dict( img_size=224, patch_size=14, embed_dim=1792, depth=64, num_heads=16, mlp_ratio=15360 / 1792, use_post_norm=True, global_pool=kwargs.pop('global_pool', 'token'), ) model = _create_eva('eva02_enormous_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/factory.py
from ._factory import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/features.py
from ._features import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/focalnet.py
""" FocalNet As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926 Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet This impl is/has: * fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible * re-ordered downsample / layer so that striding always at beginning of layer (stage) * no input size constraints or input resolution/H/W tracking through the model * torchscript fixed and a number of quirks cleaned up * feature extraction support via `features_only=True` """ # -------------------------------------------------------- # FocalNets -- Focal Modulation Networks # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Jianwei Yang (jianwyan@microsoft.com) # -------------------------------------------------------- from functools import partial from typing import Callable, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['FocalNet'] class FocalModulation(nn.Module): def __init__( self, dim: int, focal_window, focal_level: int, focal_factor: int = 2, bias: bool = True, use_post_norm: bool = False, normalize_modulator: bool = False, proj_drop: float = 0., norm_layer: Callable = LayerNorm2d, ): super().__init__() self.dim = dim self.focal_window = focal_window self.focal_level = focal_level self.focal_factor = focal_factor self.use_post_norm = use_post_norm self.normalize_modulator = normalize_modulator self.input_split = [dim, dim, self.focal_level + 1] self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias) self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias) self.act = nn.GELU() self.proj = nn.Conv2d(dim, dim, kernel_size=1) self.proj_drop = nn.Dropout(proj_drop) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append(nn.Sequential( nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False), nn.GELU(), )) self.kernel_sizes.append(kernel_size) self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity() def forward(self, x): # pre linear projection x = self.f(x) q, ctx, gates = torch.split(x, self.input_split, 1) # context aggreation ctx_all = 0 for l, focal_layer in enumerate(self.focal_layers): ctx = focal_layer(ctx) ctx_all = ctx_all + ctx * gates[:, l:l + 1] ctx_global = self.act(ctx.mean((2, 3), keepdim=True)) ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:] # normalize context if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) # focal modulation x_out = q * self.h(ctx_all) x_out = self.norm(x_out) # post linear projection x_out = self.proj(x_out) x_out = self.proj_drop(x_out) return x_out class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class FocalNetBlock(nn.Module): """ Focal Modulation Network Block. """ def __init__( self, dim: int, mlp_ratio: float = 4., focal_level: int = 1, focal_window: int = 3, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, layerscale_value: float = 1e-4, proj_drop: float = 0., drop_path: float = 0., act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm2d, ): """ Args: dim: Number of input channels. mlp_ratio: Ratio of mlp hidden dim to embedding dim. focal_level: Number of focal levels. focal_window: Focal window size at first focal level. use_post_norm: Whether to use layer norm after modulation. use_post_norm_in_modulation: Whether to use layer norm in modulation. layerscale_value: Initial layerscale value. proj_drop: Dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.mlp_ratio = mlp_ratio self.focal_window = focal_window self.focal_level = focal_level self.use_post_norm = use_post_norm self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity() self.modulation = FocalModulation( dim, focal_window=focal_window, focal_level=self.focal_level, use_post_norm=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, proj_drop=proj_drop, norm_layer=norm_layer, ) self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, use_conv=True, ) self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # Focal Modulation x = self.norm1(x) x = self.modulation(x) x = self.norm1_post(x) x = shortcut + self.drop_path1(self.ls1(x)) # FFN x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x))))) return x class FocalNetStage(nn.Module): """ A basic Focal Transformer layer for one stage. """ def __init__( self, dim: int, out_dim: int, depth: int, mlp_ratio: float = 4., downsample: bool = True, focal_level: int = 1, focal_window: int = 1, use_overlap_down: bool = False, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, layerscale_value: float = 1e-4, proj_drop: float = 0., drop_path: float = 0., norm_layer: Callable = LayerNorm2d, ): """ Args: dim: Number of input channels. out_dim: Number of output channels. depth: Number of blocks. mlp_ratio: Ratio of mlp hidden dim to embedding dim. downsample: Downsample layer at start of the layer. focal_level: Number of focal levels focal_window: Focal window size at first focal level use_overlap_down: User overlapped convolution in downsample layer. use_post_norm: Whether to use layer norm after modulation. use_post_norm_in_modulation: Whether to use layer norm in modulation. layerscale_value: Initial layerscale value proj_drop: Dropout rate for projections. drop_path: Stochastic depth rate. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.depth = depth self.grad_checkpointing = False if downsample: self.downsample = Downsample( in_chs=dim, out_chs=out_dim, stride=2, overlap=use_overlap_down, norm_layer=norm_layer, ) else: self.downsample = nn.Identity() # build blocks self.blocks = nn.ModuleList([ FocalNetBlock( dim=out_dim, mlp_ratio=mlp_ratio, focal_level=focal_level, focal_window=focal_window, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, ) for i in range(depth)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x class Downsample(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 4, overlap: bool = False, norm_layer: Optional[Callable] = None, ): """ Args: in_chs: Number of input image channels. out_chs: Number of linear projection output channels. stride: Downsample stride. overlap: Use overlapping convolutions if True. norm_layer: Normalization layer. """ super().__init__() self.stride = stride padding = 0 kernel_size = stride if overlap: assert stride in (2, 4) if stride == 4: kernel_size, padding = 7, 2 elif stride == 2: kernel_size, padding = 3, 1 self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity() def forward(self, x): x = self.proj(x) x = self.norm(x) return x class FocalNet(nn.Module): """" Focal Modulation Networks (FocalNets) """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), mlp_ratio: float = 4., focal_levels: Tuple[int, ...] = (2, 2, 2, 2), focal_windows: Tuple[int, ...] = (3, 3, 3, 3), use_overlap_down: bool = False, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, head_hidden_size: Optional[int] = None, head_init_scale: float = 1.0, layerscale_value: Optional[float] = None, drop_rate: bool = 0., proj_drop_rate: bool = 0., drop_path_rate: bool = 0.1, norm_layer: Callable = partial(LayerNorm2d, eps=1e-5), ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. embed_dim: Patch embedding dimension. depths: Depth of each Focal Transformer layer. mlp_ratio: Ratio of mlp hidden dim to embedding dim. focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level. focal_windows: The focal window size at all stages. use_overlap_down: Whether to use convolutional embedding. use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models) layerscale_value: Value for layer scale. drop_rate: Dropout rate. drop_path_rate: Stochastic depth rate. norm_layer: Normalization layer. """ super().__init__() self.num_layers = len(depths) embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)] self.num_classes = num_classes self.embed_dim = embed_dim self.num_features = embed_dim[-1] self.feature_info = [] self.stem = Downsample( in_chs=in_chans, out_chs=embed_dim[0], overlap=use_overlap_down, norm_layer=norm_layer, ) in_dim = embed_dim[0] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule layers = [] for i_layer in range(self.num_layers): out_dim = embed_dim[i_layer] layer = FocalNetStage( dim=in_dim, out_dim=out_dim, depth=depths[i_layer], mlp_ratio=mlp_ratio, downsample=i_layer > 0, focal_level=focal_levels[i_layer], focal_window=focal_windows[i_layer], use_overlap_down=use_overlap_down, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, ) in_dim = out_dim layers += [layer] self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')] self.layers = nn.Sequential(*layers) if head_hidden_size: self.norm = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) else: self.norm = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def no_weight_decay(self): return {''} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^layers\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^layers\.(\d+).downsample', (0,)), (r'^layers\.(\d+)\.\w+\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for l in self.layers: l.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) if name and 'head.fc' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ "focalnet_tiny_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_small_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_base_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_tiny_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_small_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_base_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_large_fl3.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_large_fl4.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_xlarge_fl3.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_xlarge_fl4.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_huge_fl3.ms_in22k": _cfg( hf_hub_id='timm/', num_classes=21842), "focalnet_huge_fl4.ms_in22k": _cfg( hf_hub_id='timm/', num_classes=0), }) def checkpoint_filter_fn(state_dict, model: FocalNet): state_dict = state_dict.get('model', state_dict) if 'stem.proj.weight' in state_dict: return state_dict import re out_dict = {} dest_dict = model.state_dict() for k, v in state_dict.items(): k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) k = k.replace('patch_embed', 'stem') k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) if 'norm' in k and k not in dest_dict: k = re.sub(r'norm([0-9])', r'norm\1_post', k) k = k.replace('ln.', 'norm.') k = k.replace('head', 'head.fc') if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape: v = v.reshape(dest_dict[k].shape) out_dict[k] = v return out_dict def _create_focalnet(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( FocalNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs) return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs) # FocalNet large+ models @register_model def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4, use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/fx_features.py
from ._features_fx import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/gcvit.py
""" Global Context ViT From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py Global Context Vision Transformers -https://arxiv.org/abs/2206.09959 @article{hatamizadeh2022global, title={Global Context Vision Transformers}, author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo}, journal={arXiv preprint arXiv:2206.09959}, year={2022} } Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit. The license for this code release is Apache 2.0 with no commercial restrictions. However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license (https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones... Hacked together by / Copyright 2022, Ross Wightman """ import math from functools import partial from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \ get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['GlobalContextVit'] class MbConvBlock(nn.Module): """ A depthwise separable / fused mbconv style residual block with SE, `no norm. """ def __init__( self, in_chs, out_chs=None, expand_ratio=1.0, attn_layer='se', bias=False, act_layer=nn.GELU, ): super().__init__() attn_kwargs = dict(act_layer=act_layer) if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca': attn_kwargs['rd_ratio'] = 0.25 attn_kwargs['bias'] = False attn_layer = get_attn(attn_layer) out_chs = out_chs or in_chs mid_chs = int(expand_ratio * in_chs) self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias) self.act = act_layer() self.se = attn_layer(mid_chs, **attn_kwargs) self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias) def forward(self, x): shortcut = x x = self.conv_dw(x) x = self.act(x) x = self.se(x) x = self.conv_pw(x) x = x + shortcut return x class Downsample2d(nn.Module): def __init__( self, dim, dim_out=None, reduction='conv', act_layer=nn.GELU, norm_layer=LayerNorm2d, # NOTE in NCHW ): super().__init__() dim_out = dim_out or dim self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity() self.conv_block = MbConvBlock(dim, act_layer=act_layer) assert reduction in ('conv', 'max', 'avg') if reduction == 'conv': self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False) elif reduction == 'max': assert dim == dim_out self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: assert dim == dim_out self.reduction = nn.AvgPool2d(kernel_size=2) self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity() def forward(self, x): x = self.norm1(x) x = self.conv_block(x) x = self.reduction(x) x = self.norm2(x) return x class FeatureBlock(nn.Module): def __init__( self, dim, levels=0, reduction='max', act_layer=nn.GELU, ): super().__init__() reductions = levels levels = max(1, levels) if reduction == 'avg': pool_fn = partial(nn.AvgPool2d, kernel_size=2) else: pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1) self.blocks = nn.Sequential() for i in range(levels): self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer)) if reductions: self.blocks.add_module(f'pool{i+1}', pool_fn()) reductions -= 1 def forward(self, x): return self.blocks(x) class Stem(nn.Module): def __init__( self, in_chs: int = 3, out_chs: int = 96, act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW ): super().__init__() self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1) self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer) def forward(self, x): x = self.conv1(x) x = self.down(x) return x class WindowAttentionGlobal(nn.Module): def __init__( self, dim: int, num_heads: int, window_size: Tuple[int, int], use_global: bool = True, qkv_bias: bool = True, attn_drop: float = 0., proj_drop: float = 0., ): super().__init__() window_size = to_2tuple(window_size) self.window_size = window_size self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.use_global = use_global self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads) if self.use_global: self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias) else: self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, q_global: Optional[torch.Tensor] = None): B, N, C = x.shape if self.use_global and q_global is not None: _assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal') kv = self.qkv(x) kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q = q_global.repeat(B // q_global.shape[0], 1, 1, 1) q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3) else: qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q = q * self.scale attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0 attn = self.rel_pos(attn) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x def window_partition(x, window_size: Tuple[int, int]): B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class GlobalContextVitBlock(nn.Module): def __init__( self, dim: int, feat_size: Tuple[int, int], num_heads: int, window_size: int = 7, mlp_ratio: float = 4., use_global: bool = True, qkv_bias: bool = True, layer_scale: Optional[float] = None, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., attn_layer: Callable = WindowAttentionGlobal, act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, ): super().__init__() feat_size = to_2tuple(feat_size) window_size = to_2tuple(window_size) self.window_size = window_size self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1])) self.norm1 = norm_layer(dim) self.attn = attn_layer( dim, num_heads=num_heads, window_size=window_size, use_global=use_global, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _window_attn(self, x, q_global: Optional[torch.Tensor] = None): B, H, W, C = x.shape x_win = window_partition(x, self.window_size) x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C) attn_win = self.attn(x_win, q_global) x = window_reverse(attn_win, self.window_size, (H, W)) return x def forward(self, x, q_global: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class GlobalContextVitStage(nn.Module): def __init__( self, dim, depth: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], downsample: bool = True, global_norm: bool = False, stage_norm: bool = False, mlp_ratio: float = 4., qkv_bias: bool = True, layer_scale: Optional[float] = None, proj_drop: float = 0., attn_drop: float = 0., drop_path: Union[List[float], float] = 0.0, act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, norm_layer_cl: Callable = LayerNorm2d, ): super().__init__() if downsample: self.downsample = Downsample2d( dim=dim, dim_out=dim * 2, norm_layer=norm_layer, ) dim = dim * 2 feat_size = (feat_size[0] // 2, feat_size[1] // 2) else: self.downsample = nn.Identity() self.feat_size = feat_size window_size = to_2tuple(window_size) feat_levels = int(math.log2(min(feat_size) / min(window_size))) self.global_block = FeatureBlock(dim, feat_levels) self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity() self.blocks = nn.ModuleList([ GlobalContextVitBlock( dim=dim, num_heads=num_heads, feat_size=feat_size, window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, use_global=(i % 2 != 0), layer_scale=layer_scale, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, act_layer=act_layer, norm_layer=norm_layer_cl, ) for i in range(depth) ]) self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity() self.dim = dim self.feat_size = feat_size self.grad_checkpointing = False def forward(self, x): # input NCHW, downsample & global block are 2d conv + pooling x = self.downsample(x) global_query = self.global_block(x) # reshape NCHW --> NHWC for transformer blocks x = x.permute(0, 2, 3, 1) global_query = self.global_norm(global_query.permute(0, 2, 3, 1)) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x) else: x = blk(x, global_query) x = self.norm(x) x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW return x class GlobalContextVit(nn.Module): def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', img_size: Tuple[int, int] = 224, window_ratio: Tuple[int, ...] = (32, 32, 16, 32), window_size: Tuple[int, ...] = None, embed_dim: int = 64, depths: Tuple[int, ...] = (3, 4, 19, 5), num_heads: Tuple[int, ...] = (2, 4, 8, 16), mlp_ratio: float = 3.0, qkv_bias: bool = True, layer_scale: Optional[float] = None, drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init='', act_layer: str = 'gelu', norm_layer: str = 'layernorm2d', norm_layer_cl: str = 'layernorm', norm_eps: float = 1e-5, ): super().__init__() act_layer = get_act_layer(act_layer) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps) img_size = to_2tuple(img_size) feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4 self.global_pool = global_pool self.num_classes = num_classes self.drop_rate = drop_rate num_stages = len(depths) self.num_features = int(embed_dim * 2 ** (num_stages - 1)) if window_size is not None: window_size = to_ntuple(num_stages)(window_size) else: assert window_ratio is not None window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)]) self.stem = Stem( in_chs=in_chans, out_chs=embed_dim, act_layer=act_layer, norm_layer=norm_layer ) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] for i in range(num_stages): last_stage = i == num_stages - 1 stage_scale = 2 ** max(i - 1, 0) stages.append(GlobalContextVitStage( dim=embed_dim * stage_scale, depth=depths[i], num_heads=num_heads[i], feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale), window_size=window_size[i], downsample=i != 0, stage_norm=last_stage, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, layer_scale=layer_scale, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], act_layer=act_layer, norm_layer=norm_layer, norm_layer_cl=norm_layer_cl, )) self.stages = nn.Sequential(*stages) # Classifier head self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) if weight_init: named_apply(partial(self._init_weights, scheme=weight_init), self) def _init_weights(self, module, name, scheme='vit'): # note Conv2d left as default init if scheme == 'vit': if isinstance(module, nn.Linear): nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) else: if isinstance(module, nn.Linear): nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) @torch.jit.ignore def no_weight_decay(self): return { k for k, _ in self.named_parameters() if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=r'^stages\.(\d+)' ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is None: global_pool = self.head.global_pool.pool_type self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.stem(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _create_gcvit(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = generate_default_cfgs({ 'gcvit_xxtiny.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'), 'gcvit_xtiny.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'), 'gcvit_tiny.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'), 'gcvit_small.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'), 'gcvit_base.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'), }) @register_model def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict( depths=(2, 2, 6, 2), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict( depths=(3, 4, 6, 5), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict( depths=(3, 4, 19, 5), num_heads=(2, 4, 8, 16), **kwargs) return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs) @register_model def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict( depths=(3, 4, 19, 5), num_heads=(3, 6, 12, 24), embed_dim=96, mlp_ratio=2, layer_scale=1e-5, **kwargs) return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs) @register_model def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit: model_kwargs = dict( depths=(3, 4, 19, 5), num_heads=(4, 8, 16, 32), embed_dim=128, mlp_ratio=2, layer_scale=1e-5, **kwargs) return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/ghostnet.py
""" An implementation of GhostNet Model as defined in: GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 The train script of the model is similar to that of MobileNetV3 Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch """ import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['GhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class GhostModule(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True, ): super(GhostModule, self).__init__() self.out_chs = out_chs init_chs = math.ceil(out_chs / ratio) new_chs = init_chs * (ratio - 1) self.primary_conv = nn.Sequential( nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), nn.ReLU(inplace=True) if relu else nn.Identity(), ) self.cheap_operation = nn.Sequential( nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), nn.ReLU(inplace=True) if relu else nn.Identity(), ) def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) out = torch.cat([x1, x2], dim=1) return out[:, :self.out_chs, :, :] class GhostBottleneck(nn.Module): """ Ghost bottleneck w/ optional SE""" def __init__( self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0., ): super(GhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0. self.stride = stride # Point-wise expansion self.ghost1 = GhostModule(in_chs, mid_chs, relu=True) # Depth-wise convolution if self.stride > 1: self.conv_dw = nn.Conv2d( mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None # Squeeze-and-excitation self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None # Point-wise linear projection self.ghost2 = GhostModule(mid_chs, out_chs, relu=False) # shortcut if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential( nn.Conv2d( in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs), ) def forward(self, x): shortcut = x # 1st ghost bottleneck x = self.ghost1(x) # Depth-wise convolution if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) # Squeeze-and-excitation if self.se is not None: x = self.se(x) # 2nd ghost bottleneck x = self.ghost2(x) x += self.shortcut(shortcut) return x class GhostNet(nn.Module): def __init__( self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, ): super(GhostNet, self).__init__() # setting of inverted residual blocks assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] # building first layer stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs # building inverted residual blocks stages = nn.ModuleList([]) block = GhostBottleneck stage_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for k, exp_size, c, se_ratio, s in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layers.append(block(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio)) prev_chs = out_chs if s > 1: net_stride *= 2 self.feature_info.append(dict( num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) # building last several layers self.num_features = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() # FIXME init @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes # cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.pool_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.classifier(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a GhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 16, 16, 0, 1]], # stage2 [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], # stage3 [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], # stage4 [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1] ], # stage5 [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( GhostNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **model_kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'ghostnet_050.untrained': _cfg(), 'ghostnet_100.in1k': _cfg( url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'), 'ghostnet_130.untrained': _cfg(), }) @register_model def ghostnet_050(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-0.5x """ model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_100(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-1.0x """ model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_130(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-1.3x """ model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/hardcorenas.py
from functools import partial import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._builder import pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels from ._registry import register_model, generate_default_cfgs from .mobilenetv3 import MobileNetV3, MobileNetV3Features __all__ = [] # model_registry will add each entrypoint fn to this def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs): """Creates a hardcorenas model Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS Paper: https://arxiv.org/abs/2102.11646 """ num_features = 1280 se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=32, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=se_layer, **kwargs, ) features_only = False model_cls = MobileNetV3 kwargs_filter = None if model_kwargs.pop('features_only', False): features_only = True kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool') model_cls = MobileNetV3Features model = build_model_with_cfg( model_cls, variant, pretrained, pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'), 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_A """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_B """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_C """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'], ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_D """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_E """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'], ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs) return model @register_model def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3: """ hardcorenas_F """ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'], ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'], ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25'], ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25'], ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']] model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/helpers.py
from ._builder import * from ._helpers import * from ._manipulate import * from ._prune import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/hrnet.py
""" HRNet Copied from https://github.com/HRNet/HRNet-Image-Classification Original header: Copyright (c) Microsoft Licensed under the MIT License. Written by Bin Xiao (Bin.Xiao@microsoft.com) Modified by Ke Sun (sunk@mail.ustc.edu.cn) """ import logging from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._features import FeatureInfo from ._registry import register_model, generate_default_cfgs from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE __all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this _BN_MOMENTUM = 0.1 _logger = logging.getLogger(__name__) cfg_cls = dict( hrnet_w18_small=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(1,), num_channels=(32,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(16, 32), fuse_method='SUM' ), stage3=dict( num_modules=1, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(16, 32, 64), fuse_method='SUM' ), stage4=dict( num_modules=1, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(16, 32, 64, 128), fuse_method='SUM', ), ), hrnet_w18_small_v2=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(2,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=3, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=2, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w18=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w30=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(30, 60), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(30, 60, 120), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(30, 60, 120, 240), fuse_method='SUM', ), ), hrnet_w32=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(32, 64), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256), fuse_method='SUM', ), ), hrnet_w40=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(40, 80), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(40, 80, 160), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(40, 80, 160, 320), fuse_method='SUM', ), ), hrnet_w44=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(44, 88), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(44, 88, 176), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(44, 88, 176, 352), fuse_method='SUM', ), ), hrnet_w48=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(48, 96), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(48, 96, 192), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(48, 96, 192, 384), fuse_method='SUM', ), ), hrnet_w64=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(64, 128), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(64, 128, 256), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(64, 128, 256, 512), fuse_method='SUM', ), ) ) class HighResolutionModule(nn.Module): def __init__( self, num_branches, block_types, num_blocks, num_in_chs, num_channels, fuse_method, multi_scale_output=True, ): super(HighResolutionModule, self).__init__() self._check_branches( num_branches, block_types, num_blocks, num_in_chs, num_channels, ) self.num_in_chs = num_in_chs self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches( num_branches, block_types, num_blocks, num_channels, ) self.fuse_layers = self._make_fuse_layers() self.fuse_act = nn.ReLU(False) def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): error_msg = '' if num_branches != len(num_blocks): error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) elif num_branches != len(num_channels): error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) elif num_branches != len(num_in_chs): error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) if error_msg: _logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: downsample = nn.Sequential( nn.Conv2d( self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion for i in range(1, num_blocks[branch_index]): layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block_type, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return nn.Identity() num_branches = self.num_branches num_in_chs = self.num_in_chs fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: fuse_layer.append(nn.Identity()) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_out_chs_conv3x3 = num_in_chs[i] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM) )) else: num_out_chs_conv3x3 = num_in_chs[j] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), nn.ReLU(False) )) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_in_chs(self): return self.num_in_chs def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if self.num_branches == 1: return [self.branches[0](x[0])] for i, branch in enumerate(self.branches): x[i] = branch(x[i]) x_fuse = [] for i, fuse_outer in enumerate(self.fuse_layers): y = None for j, f in enumerate(fuse_outer): if y is None: y = f(x[j]) else: y = y + f(x[j]) x_fuse.append(self.fuse_act(y)) return x_fuse class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x @torch.jit.interface class ModuleInterface(torch.nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward pass block_types_dict = { 'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck } class HighResolutionNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, head='classification', **kwargs, ): super(HighResolutionNet, self).__init__() self.num_classes = num_classes assert output_stride == 32 # FIXME support dilation cfg.update(**kwargs) stem_width = cfg['stem_width'] self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) self.act2 = nn.ReLU(inplace=True) self.stage1_cfg = cfg['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = block_types_dict[self.stage1_cfg['block_type']] num_blocks = self.stage1_cfg['num_blocks'][0] self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) stage1_out_channel = block_type.expansion * num_channels self.stage2_cfg = cfg['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = block_types_dict[self.stage2_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = cfg['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = block_types_dict[self.stage3_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = cfg['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = block_types_dict[self.stage4_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) self.head = head self.head_channels = None # set if _make_head called head_conv_bias = cfg.pop('head_conv_bias', True) if head == 'classification': # Classification Head self.num_features = 2048 self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head( pre_stage_channels, conv_bias=head_conv_bias, ) self.global_pool, self.head_drop, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) else: if head == 'incre': self.num_features = 2048 self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True) else: self.num_features = 256 self.incre_modules = None self.global_pool = nn.Identity() self.head_drop = nn.Identity() self.classifier = nn.Identity() curr_stride = 2 # module names aren't actually valid here, hook or FeatureNet based extraction would not work self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] for i, c in enumerate(self.head_channels if self.head_channels else num_channels): curr_stride *= 2 c = c * 4 if self.head_channels else c # head block_type expansion factor of 4 self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] self.init_weights() def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): head_block_type = Bottleneck self.head_channels = [32, 64, 128, 256] # Increasing the #channels on each resolution # from C, 2C, 4C, 8C to 128, 256, 512, 1024 incre_modules = [] for i, channels in enumerate(pre_stage_channels): incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) incre_modules = nn.ModuleList(incre_modules) if incre_only: return incre_modules, None, None # downsampling modules downsamp_modules = [] for i in range(len(pre_stage_channels) - 1): in_channels = self.head_channels[i] * head_block_type.expansion out_channels = self.head_channels[i + 1] * head_block_type.expansion downsamp_module = nn.Sequential( nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) downsamp_modules.append(downsamp_module) downsamp_modules = nn.ModuleList(downsamp_modules) final_layer = nn.Sequential( nn.Conv2d( in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, bias=conv_bias), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) return incre_modules, downsamp_modules, final_layer def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential( nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(nn.Identity()) else: conv3x3s = [] for j in range(i + 1 - num_branches_pre): _in_chs = num_channels_pre_layer[-1] _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs conv3x3s.append(nn.Sequential( nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): downsample = None if stride != 1 or inplanes != planes * block_type.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(inplanes, planes, stride, downsample)] inplanes = planes * block_type.expansion for i in range(1, block_types): layers.append(block_type(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block_type = block_types_dict[layer_config['block_type']] fuse_method = layer_config['fuse_method'] modules = [] for i in range(num_modules): # multi_scale_output is only used last module reset_multi_scale_output = multi_scale_output or i < num_modules - 1 modules.append(HighResolutionModule( num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) ) num_in_chs = modules[-1].get_num_in_chs() return SequentialList(*modules), num_in_chs @torch.jit.ignore def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv[12]|bn[12]', block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [ (r'^layer(\d+)\.(\d+)', None), (r'^stage(\d+)\.(\d+)', None), (r'^transition(\d+)', (99999,)), ], ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def stages(self, x) -> List[torch.Tensor]: x = self.layer1(x) xl = [t(x) for i, t in enumerate(self.transition1)] yl = self.stage2(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] yl = self.stage3(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] yl = self.stage4(xl) return yl def forward_features(self, x): # Stem x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) # Stages yl = self.stages(x) if self.incre_modules is None or self.downsamp_modules is None: return yl y = None for i, incre in enumerate(self.incre_modules): if y is None: y = incre(yl[i]) else: down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing y = incre(yl[i]) + down.forward(y) y = self.final_layer(y) return y def forward_head(self, x, pre_logits: bool = False): # Classification Head x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): y = self.forward_features(x) x = self.forward_head(y) return x class HighResolutionNetFeatures(HighResolutionNet): """HighResolutionNet feature extraction The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. It would be more complicated to use the FeatureNet helpers. The `feature_location=incre` allows grabbing increased channel count features using part of the classification head. If `feature_location=''` the default HRNet features are returned. First stem conv is used for stride 2 features. """ def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, feature_location='incre', out_indices=(0, 1, 2, 3, 4), **kwargs, ): assert feature_location in ('incre', '') super(HighResolutionNetFeatures, self).__init__( cfg, in_chans=in_chans, num_classes=num_classes, output_stride=output_stride, global_pool=global_pool, drop_rate=drop_rate, head=feature_location, **kwargs, ) self.feature_info = FeatureInfo(self.feature_info, out_indices) self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} def forward_features(self, x): assert False, 'Not supported' def forward(self, x) -> List[torch.tensor]: out = [] x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if 0 in self._out_idx: out.append(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.stages(x) if self.incre_modules is not None: x = [incre(f) for f, incre in zip(x, self.incre_modules)] for i, f in enumerate(x): if i + 1 in self._out_idx: out.append(f) return out def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): model_cls = HighResolutionNet features_only = False kwargs_filter = None if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures kwargs_filter = ('num_classes', 'global_pool') features_only = True cfg_variant = cfg_variant or variant model = build_model_with_cfg( model_cls, variant, pretrained, model_cfg=cfg_cls[cfg_variant], pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) model.default_cfg = model.pretrained_cfg # backwards compat return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'), 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18.ms_aug_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, ), 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), 'hrnet_w48_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), }) @register_model def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) @register_model def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) @register_model def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18', pretrained, **kwargs) @register_model def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w30', pretrained, **kwargs) @register_model def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w32', pretrained, **kwargs) @register_model def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w40', pretrained, **kwargs) @register_model def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w44', pretrained, **kwargs) @register_model def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w48', pretrained, **kwargs) @register_model def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w64', pretrained, **kwargs) @register_model def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) @register_model def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/hub.py
from ._hub import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/inception_resnet_v2.py
""" Pytorch Inception-Resnet-V2 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionResnetV2'] class Mixed_5b(nn.Module): def __init__(self, conv_block=None): super(Mixed_5b, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(192, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(192, 48, kernel_size=1, stride=1), conv_block(48, 64, kernel_size=5, stride=1, padding=2) ) self.branch2 = nn.Sequential( conv_block(192, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(192, 64, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block35(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block35, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 32, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 32, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(320, 32, kernel_size=1, stride=1), conv_block(32, 48, kernel_size=3, stride=1, padding=1), conv_block(48, 64, kernel_size=3, stride=1, padding=1) ) self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_6a(nn.Module): def __init__(self, conv_block=None): super(Mixed_6a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = conv_block(320, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(320, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=3, stride=1, padding=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class Block17(nn.Module): def __init__(self, scale=1.0, conv_block=None): super(Block17, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1088, 128, kernel_size=1, stride=1), conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1) self.act = nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x out = self.act(out) return out class Mixed_7a(nn.Module): def __init__(self, conv_block=None): super(Mixed_7a, self).__init__() conv_block = conv_block or ConvNormAct self.branch0 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 384, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=2) ) self.branch2 = nn.Sequential( conv_block(1088, 256, kernel_size=1, stride=1), conv_block(256, 288, kernel_size=3, stride=1, padding=1), conv_block(288, 320, kernel_size=3, stride=2) ) self.branch3 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class Block8(nn.Module): def __init__(self, scale=1.0, no_relu=False, conv_block=None): super(Block8, self).__init__() self.scale = scale conv_block = conv_block or ConvNormAct self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(2080, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)), conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) ) self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1) self.relu = None if no_relu else nn.ReLU() def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) out = self.conv2d(out) out = out * self.scale + x if self.relu is not None: out = self.relu(out) return out class InceptionResnetV2(nn.Module): def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., output_stride=32, global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionResnetV2, self).__init__() self.num_classes = num_classes self.num_features = 1536 assert output_stride == 32 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2) self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1) self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1) self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')] self.maxpool_3a = nn.MaxPool2d(3, stride=2) self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1) self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1) self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')] self.maxpool_5a = nn.MaxPool2d(3, stride=2) self.mixed_5b = Mixed_5b(conv_block=conv_block) self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)]) self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')] self.mixed_6a = Mixed_6a(conv_block=conv_block) self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)]) self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')] self.mixed_7a = Mixed_7a(conv_block=conv_block) self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)]) self.block8 = Block8(no_relu=True, conv_block=conv_block) self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1) self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')] self.global_pool, self.head_drop, self.classif = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('classif',)) def _matcher(name): if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]): return 1 elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]): return len(module_map) + 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classif def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv2d_1a(x) x = self.conv2d_2a(x) x = self.conv2d_2b(x) x = self.maxpool_3a(x) x = self.conv2d_3b(x) x = self.conv2d_4a(x) x = self.maxpool_5a(x) x = self.mixed_5b(x) x = self.repeat(x) x = self.mixed_6a(x) x = self.repeat_1(x) x = self.mixed_7a(x) x = self.repeat_2(x) x = self.block8(x) x = self.conv2d_7b(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classif(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_resnet_v2(variant, pretrained=False, **kwargs): return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs) default_cfgs = generate_default_cfgs({ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz 'inception_resnet_v2.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', }, # As per https://arxiv.org/abs/1705.07204 and # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz 'inception_resnet_v2.tf_ens_adv_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif', } }) @register_model def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2: return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/inception_v3.py
""" Inception-V3 Originally from torchvision Inception3 model Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct from ._builder import build_model_with_cfg from ._builder import resolve_pretrained_cfg from ._manipulate import flatten_modules from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this class InceptionA(nn.Module): def __init__(self, in_channels, pool_features, conv_block=None): super(InceptionA, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionB(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionB, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionC(nn.Module): def __init__(self, in_channels, channels_7x7, conv_block=None): super(InceptionC, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionD, self).__init__() conv_block = conv_block or ConvNormAct self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) def _forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionE(nn.Module): def __init__(self, in_channels, conv_block=None): super(InceptionE, self).__init__() conv_block = conv_block or ConvNormAct self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = conv_block(in_channels, 192, kernel_size=1) def _forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return outputs def forward(self, x): outputs = self._forward(x) return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes, conv_block=None): super(InceptionAux, self).__init__() conv_block = conv_block or ConvNormAct self.conv0 = conv_block(in_channels, 128, kernel_size=1) self.conv1 = conv_block(128, 768, kernel_size=5) self.conv1.stddev = 0.01 self.fc = Linear(768, num_classes) self.fc.stddev = 0.001 def forward(self, x): # N x 768 x 17 x 17 x = F.avg_pool2d(x, kernel_size=5, stride=3) # N x 768 x 5 x 5 x = self.conv0(x) # N x 128 x 5 x 5 x = self.conv1(x) # N x 768 x 1 x 1 # Adaptive average pooling x = F.adaptive_avg_pool2d(x, (1, 1)) # N x 768 x 1 x 1 x = torch.flatten(x, 1) # N x 768 x = self.fc(x) # N x 1000 return x class InceptionV3(nn.Module): """Inception-V3 """ aux_logits: torch.jit.Final[bool] def __init__( self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg', aux_logits=False, norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionV3, self).__init__() self.num_classes = num_classes self.aux_logits = aux_logits conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2) self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2) self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block) self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block) self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block) self.Mixed_6a = InceptionB(288, conv_block=conv_block) self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block) self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block) self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block) else: self.AuxLogits = None self.Mixed_7a = InceptionD(768, conv_block=conv_block) self.Mixed_7b = InceptionE(1280, conv_block=conv_block) self.Mixed_7c = InceptionE(2048, conv_block=conv_block) self.feature_info = [ dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'), dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'), dict(num_chs=288, reduction=8, module='Mixed_5d'), dict(num_chs=768, reduction=16, module='Mixed_6e'), dict(num_chs=2048, reduction=32, module='Mixed_7c'), ] self.num_features = 2048 self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): stddev = m.stddev if hasattr(m, 'stddev') else 0.1 trunc_normal_(m.weight, std=stddev) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))} module_map.pop(('fc',)) def _matcher(name): if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]): return 0 elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]): return 1 else: for k in module_map.keys(): if k == tuple(name.split('.')[:len(k)]): return module_map[k] return float('inf') return _matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_preaux(self, x): x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149 x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147 x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147 x = self.Pool1(x) # N x 64 x 73 x 73 x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73 x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71 x = self.Pool2(x) # N x 192 x 35 x 35 x = self.Mixed_5b(x) # N x 256 x 35 x 35 x = self.Mixed_5c(x) # N x 288 x 35 x 35 x = self.Mixed_5d(x) # N x 288 x 35 x 35 x = self.Mixed_6a(x) # N x 768 x 17 x 17 x = self.Mixed_6b(x) # N x 768 x 17 x 17 x = self.Mixed_6c(x) # N x 768 x 17 x 17 x = self.Mixed_6d(x) # N x 768 x 17 x 17 x = self.Mixed_6e(x) # N x 768 x 17 x 17 return x def forward_postaux(self, x): x = self.Mixed_7a(x) # N x 1280 x 8 x 8 x = self.Mixed_7b(x) # N x 2048 x 8 x 8 x = self.Mixed_7c(x) # N x 2048 x 8 x 8 return x def forward_features(self, x): x = self.forward_preaux(x) if self.aux_logits: aux = self.AuxLogits(x) x = self.forward_postaux(x) return x, aux x = self.forward_postaux(x) return x def forward_head(self, x): x = self.global_pool(x) x = self.head_drop(x) x = self.fc(x) return x def forward(self, x): if self.aux_logits: x, aux = self.forward_features(x) x = self.forward_head(x) return x, aux x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_v3(variant, pretrained=False, **kwargs): pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None)) aux_logits = kwargs.get('aux_logits', False) has_aux_logits = False if pretrained_cfg: # only torchvision pretrained weights have aux logits has_aux_logits = pretrained_cfg.tag == 'tv_in1k' if aux_logits: assert not kwargs.pop('features_only', False) load_strict = has_aux_logits else: load_strict = not has_aux_logits return build_model_with_cfg( InceptionV3, variant, pretrained, pretrained_cfg=pretrained_cfg, pretrained_strict=load_strict, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ # original PyTorch weights, ported from Tensorflow but modified 'inception_v3.tv_in1k': _cfg( # NOTE checkpoint has aux logit layer weights hf_hub_id='timm/', url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'), # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz) 'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'), # my port of Tensorflow adversarially trained Inception V3 from # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz 'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'), # from gluon pretrained models, best performing in terms of accuracy/loss metrics # https://gluon-cv.mxnet.io/model_zoo/classification.html 'inception_v3.gluon_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults std=IMAGENET_DEFAULT_STD, # also works well with inception defaults ) }) @register_model def inception_v3(pretrained=False, **kwargs) -> InceptionV3: model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'tf_inception_v3': 'inception_v3.tf_in1k', 'adv_inception_v3': 'inception_v3.tf_adv_in1k', 'gluon_inception_v3': 'inception_v3.gluon_in1k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/inception_v4.py
""" Pytorch Inception-V4 implementation Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License) """ from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import create_classifier, ConvNormAct from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['InceptionV4'] class Mixed3a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed3a, self).__init__() self.maxpool = nn.MaxPool2d(3, stride=2) self.conv = conv_block(64, 96, kernel_size=3, stride=2) def forward(self, x): x0 = self.maxpool(x) x1 = self.conv(x) out = torch.cat((x0, x1), 1) return out class Mixed4a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed4a, self).__init__() self.branch0 = nn.Sequential( conv_block(160, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1) ) self.branch1 = nn.Sequential( conv_block(160, 64, kernel_size=1, stride=1), conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(64, 96, kernel_size=(3, 3), stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) out = torch.cat((x0, x1), 1) return out class Mixed5a(nn.Module): def __init__(self, conv_block=ConvNormAct): super(Mixed5a, self).__init__() self.conv = conv_block(192, 192, kernel_size=3, stride=2) self.maxpool = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.conv(x) x1 = self.maxpool(x) out = torch.cat((x0, x1), 1) return out class InceptionA(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionA, self).__init__() self.branch0 = conv_block(384, 96, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(384, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1) ) self.branch2 = nn.Sequential( conv_block(384, 64, kernel_size=1, stride=1), conv_block(64, 96, kernel_size=3, stride=1, padding=1), conv_block(96, 96, kernel_size=3, stride=1, padding=1) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(384, 96, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class ReductionA(nn.Module): def __init__(self, conv_block=ConvNormAct): super(ReductionA, self).__init__() self.branch0 = conv_block(384, 384, kernel_size=3, stride=2) self.branch1 = nn.Sequential( conv_block(384, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=3, stride=1, padding=1), conv_block(224, 256, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class InceptionB(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionB, self).__init__() self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1) self.branch1 = nn.Sequential( conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0)) ) self.branch2 = nn.Sequential( conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)) ) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(1024, 128, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class ReductionB(nn.Module): def __init__(self, conv_block=ConvNormAct): super(ReductionB, self).__init__() self.branch0 = nn.Sequential( conv_block(1024, 192, kernel_size=1, stride=1), conv_block(192, 192, kernel_size=3, stride=2) ) self.branch1 = nn.Sequential( conv_block(1024, 256, kernel_size=1, stride=1), conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)), conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)), conv_block(320, 320, kernel_size=3, stride=2) ) self.branch2 = nn.MaxPool2d(3, stride=2) def forward(self, x): x0 = self.branch0(x) x1 = self.branch1(x) x2 = self.branch2(x) out = torch.cat((x0, x1, x2), 1) return out class InceptionC(nn.Module): def __init__(self, conv_block=ConvNormAct): super(InceptionC, self).__init__() self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1) self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1) self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1) self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1)) self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0)) self.branch3 = nn.Sequential( nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), conv_block(1536, 256, kernel_size=1, stride=1) ) def forward(self, x): x0 = self.branch0(x) x1_0 = self.branch1_0(x) x1_1a = self.branch1_1a(x1_0) x1_1b = self.branch1_1b(x1_0) x1 = torch.cat((x1_1a, x1_1b), 1) x2_0 = self.branch2_0(x) x2_1 = self.branch2_1(x2_0) x2_2 = self.branch2_2(x2_1) x2_3a = self.branch2_3a(x2_2) x2_3b = self.branch2_3b(x2_2) x2 = torch.cat((x2_3a, x2_3b), 1) x3 = self.branch3(x) out = torch.cat((x0, x1, x2, x3), 1) return out class InceptionV4(nn.Module): def __init__( self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', norm_layer='batchnorm2d', norm_eps=1e-3, act_layer='relu', ): super(InceptionV4, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.num_features = 1536 conv_block = partial( ConvNormAct, padding=0, norm_layer=norm_layer, act_layer=act_layer, norm_kwargs=dict(eps=norm_eps), act_kwargs=dict(inplace=True), ) features = [ conv_block(in_chans, 32, kernel_size=3, stride=2), conv_block(32, 32, kernel_size=3, stride=1), conv_block(32, 64, kernel_size=3, stride=1, padding=1), Mixed3a(conv_block), Mixed4a(conv_block), Mixed5a(conv_block), ] features += [InceptionA(conv_block) for _ in range(4)] features += [ReductionA(conv_block)] # Mixed6a features += [InceptionB(conv_block) for _ in range(7)] features += [ReductionB(conv_block)] # Mixed7a features += [InceptionC(conv_block) for _ in range(3)] self.features = nn.Sequential(*features) self.feature_info = [ dict(num_chs=64, reduction=2, module='features.2'), dict(num_chs=160, reduction=4, module='features.3'), dict(num_chs=384, reduction=8, module='features.9'), dict(num_chs=1024, reduction=16, module='features.17'), dict(num_chs=1536, reduction=32, module='features.21'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^features\.[012]\.', blocks=r'^features\.(\d+)' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4: return build_model_with_cfg( InceptionV4, variant, pretrained, feature_cfg=dict(flatten_sequential=True), **kwargs, ) default_cfgs = generate_default_cfgs({ 'inception_v4.tf_in1k': { 'hf_hub_id': 'timm/', 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'features.0.conv', 'classifier': 'last_linear', } }) @register_model def inception_v4(pretrained=False, **kwargs): return _create_inception_v4('inception_v4', pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/levit.py
""" LeViT Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference` - https://arxiv.org/abs/2104.01136 @article{graham2021levit, title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference}, author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze}, journal={arXiv preprint arXiv:22104.01136}, year={2021} } Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow. This version combines both conv/linear models and fixes torchscript compatibility. Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # Modified from # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py # Copyright 2020 Ross Wightman, Apache-2.0 License from collections import OrderedDict from functools import partial from typing import Dict import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN from timm.layers import to_ntuple, to_2tuple, get_act_layer, DropPath, trunc_normal_ from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['Levit'] class ConvNorm(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.linear = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, dilation, groups, bias=False) self.bn = nn.BatchNorm2d(out_chs) nn.init.constant_(self.bn.weight, bn_weight_init) @torch.no_grad() def fuse(self): c, bn = self.linear, self.bn w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d( w.size(1), w.size(0), w.shape[2:], stride=self.linear.stride, padding=self.linear.padding, dilation=self.linear.dilation, groups=self.linear.groups) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): return self.bn(self.linear(x)) class LinearNorm(nn.Module): def __init__(self, in_features, out_features, bn_weight_init=1): super().__init__() self.linear = nn.Linear(in_features, out_features, bias=False) self.bn = nn.BatchNorm1d(out_features) nn.init.constant_(self.bn.weight, bn_weight_init) @torch.no_grad() def fuse(self): l, bn = self.linear, self.bn w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[:, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): x = self.linear(x) return self.bn(x.flatten(0, 1)).reshape_as(x) class NormLinear(nn.Module): def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.): super().__init__() self.bn = nn.BatchNorm1d(in_features) self.drop = nn.Dropout(drop) self.linear = nn.Linear(in_features, out_features, bias=bias) trunc_normal_(self.linear.weight, std=std) if self.linear.bias is not None: nn.init.constant_(self.linear.bias, 0) @torch.no_grad() def fuse(self): bn, l = self.bn, self.linear w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.linear.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.linear.bias m = nn.Linear(w.size(1), w.size(0)) m.weight.data.copy_(w) m.bias.data.copy_(b) return m def forward(self, x): return self.linear(self.drop(self.bn(x))) class Stem8(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 8 self.add_module('conv1', ConvNorm(in_chs, out_chs // 4, 3, stride=2, padding=1)) self.add_module('act1', act_layer()) self.add_module('conv2', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) self.add_module('act2', act_layer()) self.add_module('conv3', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) class Stem16(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.stride = 16 self.add_module('conv1', ConvNorm(in_chs, out_chs // 8, 3, stride=2, padding=1)) self.add_module('act1', act_layer()) self.add_module('conv2', ConvNorm(out_chs // 8, out_chs // 4, 3, stride=2, padding=1)) self.add_module('act2', act_layer()) self.add_module('conv3', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1)) self.add_module('act3', act_layer()) self.add_module('conv4', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1)) class Downsample(nn.Module): def __init__(self, stride, resolution, use_pool=False): super().__init__() self.stride = stride self.resolution = to_2tuple(resolution) self.pool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) if use_pool else None def forward(self, x): B, N, C = x.shape x = x.view(B, self.resolution[0], self.resolution[1], C) if self.pool is not None: x = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) else: x = x[:, ::self.stride, ::self.stride] return x.reshape(B, -1, C) class Attention(nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4., resolution=14, use_conv=False, act_layer=nn.SiLU, ): super().__init__() ln_layer = ConvNorm if use_conv else LinearNorm resolution = to_2tuple(resolution) self.use_conv = use_conv self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = int(attn_ratio * key_dim) * num_heads self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2) self.proj = nn.Sequential(OrderedDict([ ('act', act_layer()), ('ln', ln_layer(self.val_attn_dim, dim, bn_weight_init=0)) ])) self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): # x (B,C,H,W) if self.use_conv: B, C, H, W = x.shape q, k, v = self.qkv(x).view( B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2) attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) else: B, N, C = x.shape q, k, v = self.qkv(x).view( B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3) q = q.permute(0, 2, 1, 3) k = k.permute(0, 2, 3, 1) v = v.permute(0, 2, 1, 3) attn = q @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim) x = self.proj(x) return x class AttentionDownsample(nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=2.0, stride=2, resolution=14, use_conv=False, use_pool=False, act_layer=nn.SiLU, ): super().__init__() resolution = to_2tuple(resolution) self.stride = stride self.resolution = resolution self.num_heads = num_heads self.key_dim = key_dim self.key_attn_dim = key_dim * num_heads self.val_dim = int(attn_ratio * key_dim) self.val_attn_dim = self.val_dim * self.num_heads self.scale = key_dim ** -0.5 self.use_conv = use_conv if self.use_conv: ln_layer = ConvNorm sub_layer = partial( nn.AvgPool2d, kernel_size=3 if use_pool else 1, padding=1 if use_pool else 0, count_include_pad=False) else: ln_layer = LinearNorm sub_layer = partial(Downsample, resolution=resolution, use_pool=use_pool) self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim) self.q = nn.Sequential(OrderedDict([ ('down', sub_layer(stride=stride)), ('ln', ln_layer(in_dim, self.key_attn_dim)) ])) self.proj = nn.Sequential(OrderedDict([ ('act', act_layer()), ('ln', ln_layer(self.val_attn_dim, out_dim)) ])) self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1])) k_pos = torch.stack(torch.meshgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1) q_pos = torch.stack(torch.meshgrid( torch.arange(0, resolution[0], step=stride), torch.arange(0, resolution[1], step=stride))).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): if self.use_conv: B, C, H, W = x.shape HH, WW = (H - 1) // self.stride + 1, (W - 1) // self.stride + 1 k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2) q = self.q(x).view(B, self.num_heads, self.key_dim, -1) attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (v @ attn.transpose(-2, -1)).reshape(B, self.val_attn_dim, HH, WW) else: B, N, C = x.shape k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3) k = k.permute(0, 2, 3, 1) # BHCN v = v.permute(0, 2, 1, 3) # BHNC q = self.q(x).view(B, -1, self.num_heads, self.key_dim).permute(0, 2, 1, 3) attn = q @ k * self.scale + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim) x = self.proj(x) return x class LevitMlp(nn.Module): """ MLP for Levit w/ normalization + ability to switch btw conv and linear """ def __init__( self, in_features, hidden_features=None, out_features=None, use_conv=False, act_layer=nn.SiLU, drop=0. ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features ln_layer = ConvNorm if use_conv else LinearNorm self.ln1 = ln_layer(in_features, hidden_features) self.act = act_layer() self.drop = nn.Dropout(drop) self.ln2 = ln_layer(hidden_features, out_features, bn_weight_init=0) def forward(self, x): x = self.ln1(x) x = self.act(x) x = self.drop(x) x = self.ln2(x) return x class LevitDownsample(nn.Module): def __init__( self, in_dim, out_dim, key_dim, num_heads=8, attn_ratio=4., mlp_ratio=2., act_layer=nn.SiLU, attn_act_layer=None, resolution=14, use_conv=False, use_pool=False, drop_path=0., ): super().__init__() attn_act_layer = attn_act_layer or act_layer self.attn_downsample = AttentionDownsample( in_dim=in_dim, out_dim=out_dim, key_dim=key_dim, num_heads=num_heads, attn_ratio=attn_ratio, act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, use_pool=use_pool, ) self.mlp = LevitMlp( out_dim, int(out_dim * mlp_ratio), use_conv=use_conv, act_layer=act_layer ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = self.attn_downsample(x) x = x + self.drop_path(self.mlp(x)) return x class LevitBlock(nn.Module): def __init__( self, dim, key_dim, num_heads=8, attn_ratio=4., mlp_ratio=2., resolution=14, use_conv=False, act_layer=nn.SiLU, attn_act_layer=None, drop_path=0., ): super().__init__() attn_act_layer = attn_act_layer or act_layer self.attn = Attention( dim=dim, key_dim=key_dim, num_heads=num_heads, attn_ratio=attn_ratio, resolution=resolution, use_conv=use_conv, act_layer=attn_act_layer, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = LevitMlp( dim, int(dim * mlp_ratio), use_conv=use_conv, act_layer=act_layer ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.attn(x)) x = x + self.drop_path2(self.mlp(x)) return x class LevitStage(nn.Module): def __init__( self, in_dim, out_dim, key_dim, depth=4, num_heads=8, attn_ratio=4.0, mlp_ratio=4.0, act_layer=nn.SiLU, attn_act_layer=None, resolution=14, downsample='', use_conv=False, drop_path=0., ): super().__init__() resolution = to_2tuple(resolution) if downsample: self.downsample = LevitDownsample( in_dim, out_dim, key_dim=key_dim, num_heads=in_dim // key_dim, attn_ratio=4., mlp_ratio=2., act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, drop_path=drop_path, ) resolution = [(r - 1) // 2 + 1 for r in resolution] else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] for _ in range(depth): blocks += [LevitBlock( out_dim, key_dim, num_heads=num_heads, attn_ratio=attn_ratio, mlp_ratio=mlp_ratio, act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, drop_path=drop_path, )] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Levit(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems w/ train scripts that don't take tuple outputs, """ def __init__( self, img_size=224, in_chans=3, num_classes=1000, embed_dim=(192,), key_dim=64, depth=(12,), num_heads=(3,), attn_ratio=2., mlp_ratio=2., stem_backbone=None, stem_stride=None, stem_type='s16', down_op='subsample', act_layer='hard_swish', attn_act_layer=None, use_conv=False, global_pool='avg', drop_rate=0., drop_path_rate=0.): super().__init__() act_layer = get_act_layer(act_layer) attn_act_layer = get_act_layer(attn_act_layer or act_layer) self.use_conv = use_conv self.num_classes = num_classes self.global_pool = global_pool self.num_features = embed_dim[-1] self.embed_dim = embed_dim self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] num_stages = len(embed_dim) assert len(depth) == num_stages num_heads = to_ntuple(num_stages)(num_heads) attn_ratio = to_ntuple(num_stages)(attn_ratio) mlp_ratio = to_ntuple(num_stages)(mlp_ratio) if stem_backbone is not None: assert stem_stride >= 2 self.stem = stem_backbone stride = stem_stride else: assert stem_type in ('s16', 's8') if stem_type == 's16': self.stem = Stem16(in_chans, embed_dim[0], act_layer=act_layer) else: self.stem = Stem8(in_chans, embed_dim[0], act_layer=act_layer) stride = self.stem.stride resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) in_dim = embed_dim[0] stages = [] for i in range(num_stages): stage_stride = 2 if i > 0 else 1 stages += [LevitStage( in_dim, embed_dim[i], key_dim, depth=depth[i], num_heads=num_heads[i], attn_ratio=attn_ratio[i], mlp_ratio=mlp_ratio[i], act_layer=act_layer, attn_act_layer=attn_act_layer, resolution=resolution, use_conv=use_conv, downsample=down_op if stage_stride == 2 else '', drop_path=drop_path_rate )] stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) # Classifier head self.head = NormLinear(embed_dim[-1], num_classes, drop=drop_rate) if num_classes > 0 else nn.Identity() @torch.jit.ignore def no_weight_decay(self): return {x for x in self.state_dict().keys() if 'attention_biases' in x} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None, distillation=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = NormLinear( self.embed_dim[-1], num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) if not self.use_conv: x = x.flatten(2).transpose(1, 2) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class LevitDistilled(Levit): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token @torch.jit.ignore def get_classifier(self): return self.head, self.head_dist def reset_classifier(self, num_classes, global_pool=None, distillation=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = NormLinear( self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity() self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] # filter out attn biases, should not have been persistent state_dict = {k: v for k, v in state_dict.items() if 'attention_bias_idxs' not in k} D = model.state_dict() out_dict = {} for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()): if va.ndim == 4 and vb.ndim == 2: vb = vb[:, :, None, None] if va.shape != vb.shape: # head or first-conv shapes may change for fine-tune assert 'head' in ka or 'stem.conv1.linear' in ka out_dict[ka] = vb return out_dict model_cfgs = dict( levit_128s=dict( embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)), levit_128=dict( embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)), levit_192=dict( embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)), levit_256=dict( embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)), levit_384=dict( embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)), # stride-8 stem experiments levit_384_s8=dict( embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4), act_layer='silu', stem_type='s8'), levit_512_s8=dict( embed_dim=(512, 640, 896), key_dim=64, num_heads=(8, 10, 14), depth=(4, 4, 4), act_layer='silu', stem_type='s8'), # wider experiments levit_512=dict( embed_dim=(512, 768, 1024), key_dim=64, num_heads=(8, 12, 16), depth=(4, 4, 4), act_layer='silu'), # deeper experiments levit_256d=dict( embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6), act_layer='silu'), levit_512d=dict( embed_dim=(512, 640, 768), key_dim=64, num_heads=(8, 10, 12), depth=(4, 8, 6), act_layer='silu'), ) def create_levit(variant, cfg_variant=None, pretrained=False, distilled=True, **kwargs): is_conv = '_conv' in variant out_indices = kwargs.pop('out_indices', (0, 1, 2)) if kwargs.get('features_only', None): if not is_conv: raise RuntimeError('features_only not implemented for LeVit in non-convolutional mode.') if cfg_variant is None: if variant in model_cfgs: cfg_variant = variant elif is_conv: cfg_variant = variant.replace('_conv', '') model_cfg = dict(model_cfgs[cfg_variant], **kwargs) model = build_model_with_cfg( LevitDistilled if distilled else Levit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **model_cfg, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.linear', 'classifier': ('head.linear', 'head_dist.linear'), **kwargs } default_cfgs = generate_default_cfgs({ # weights in nn.Linear mode 'levit_128s.fb_dist_in1k': _cfg( hf_hub_id='timm/', ), 'levit_128.fb_dist_in1k': _cfg( hf_hub_id='timm/', ), 'levit_192.fb_dist_in1k': _cfg( hf_hub_id='timm/', ), 'levit_256.fb_dist_in1k': _cfg( hf_hub_id='timm/', ), 'levit_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', ), # weights in nn.Conv2d mode 'levit_conv_128s.fb_dist_in1k': _cfg( hf_hub_id='timm/', pool_size=(4, 4), ), 'levit_conv_128.fb_dist_in1k': _cfg( hf_hub_id='timm/', pool_size=(4, 4), ), 'levit_conv_192.fb_dist_in1k': _cfg( hf_hub_id='timm/', pool_size=(4, 4), ), 'levit_conv_256.fb_dist_in1k': _cfg( hf_hub_id='timm/', pool_size=(4, 4), ), 'levit_conv_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', pool_size=(4, 4), ), 'levit_384_s8.untrained': _cfg(classifier='head.linear'), 'levit_512_s8.untrained': _cfg(classifier='head.linear'), 'levit_512.untrained': _cfg(classifier='head.linear'), 'levit_256d.untrained': _cfg(classifier='head.linear'), 'levit_512d.untrained': _cfg(classifier='head.linear'), 'levit_conv_384_s8.untrained': _cfg(classifier='head.linear'), 'levit_conv_512_s8.untrained': _cfg(classifier='head.linear'), 'levit_conv_512.untrained': _cfg(classifier='head.linear'), 'levit_conv_256d.untrained': _cfg(classifier='head.linear'), 'levit_conv_512d.untrained': _cfg(classifier='head.linear'), }) @register_model def levit_128s(pretrained=False, **kwargs) -> Levit: return create_levit('levit_128s', pretrained=pretrained, **kwargs) @register_model def levit_128(pretrained=False, **kwargs) -> Levit: return create_levit('levit_128', pretrained=pretrained, **kwargs) @register_model def levit_192(pretrained=False, **kwargs) -> Levit: return create_levit('levit_192', pretrained=pretrained, **kwargs) @register_model def levit_256(pretrained=False, **kwargs) -> Levit: return create_levit('levit_256', pretrained=pretrained, **kwargs) @register_model def levit_384(pretrained=False, **kwargs) -> Levit: return create_levit('levit_384', pretrained=pretrained, **kwargs) @register_model def levit_384_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_384_s8', pretrained=pretrained, **kwargs) @register_model def levit_512_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512_s8', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_512(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_256d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_256d', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_512d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_512d', pretrained=pretrained, distilled=False, **kwargs) @register_model def levit_conv_128s(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_128s', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_128(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_128', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_192(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_192', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_256(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_256', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_384(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_384', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_384_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_384_s8', pretrained=pretrained, use_conv=True, **kwargs) @register_model def levit_conv_512_s8(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512_s8', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_512(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_256d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_256d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs) @register_model def levit_conv_512d(pretrained=False, **kwargs) -> Levit: return create_levit('levit_conv_512d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/maxxvit.py
""" MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch. 99% of the implementation was done from papers, however last minute some adjustments were made based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit There are multiple sets of models defined for both architectures. Typically, names with a `_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit. These configs work well and appear to be a bit faster / lower resource than the paper. The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match. Papers: MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697 @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803 @article{DBLP:journals/corr/abs-2106-04803, author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan}, title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes}, journal = {CoRR}, volume = {abs/2106.04803}, year = {2021} } Hacked together by / Copyright 2022, Ross Wightman """ import math from collections import OrderedDict from dataclasses import dataclass, replace, field from functools import partial from typing import Callable, Optional, Union, Tuple, List import torch from torch import nn from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, ConvMlp, DropPath, LayerNorm, ClassifierHead, NormMlpClassifierHead from timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d, create_pool2d from timm.layers import trunc_normal_tf_, to_2tuple, extend_tuple, make_divisible, _assert from timm.layers import RelPosMlp, RelPosBias, RelPosBiasTf, use_fused_attn, resize_rel_pos_bias_table from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit'] @dataclass class MaxxVitTransformerCfg: dim_head: int = 32 head_first: bool = True # head ordering in qkv channel dim expand_ratio: float = 4.0 expand_first: bool = True shortcut_bias: bool = True attn_bias: bool = True attn_drop: float = 0. proj_drop: float = 0. pool_type: str = 'avg2' rel_pos_type: str = 'bias' rel_pos_dim: int = 512 # for relative position types w/ MLP partition_ratio: int = 32 window_size: Optional[Tuple[int, int]] = None grid_size: Optional[Tuple[int, int]] = None no_block_attn: bool = False # disable window block attention for maxvit (ie only grid) use_nchw_attn: bool = False # for MaxViT variants (not used for CoAt), keep tensors in NCHW order init_values: Optional[float] = None act_layer: str = 'gelu' norm_layer: str = 'layernorm2d' norm_layer_cl: str = 'layernorm' norm_eps: float = 1e-6 def __post_init__(self): if self.grid_size is not None: self.grid_size = to_2tuple(self.grid_size) if self.window_size is not None: self.window_size = to_2tuple(self.window_size) if self.grid_size is None: self.grid_size = self.window_size @dataclass class MaxxVitConvCfg: block_type: str = 'mbconv' expand_ratio: float = 4.0 expand_output: bool = True # calculate expansion channels from output (vs input chs) kernel_size: int = 3 group_size: int = 1 # 1 == depthwise pre_norm_act: bool = False # activation after pre-norm output_bias: bool = True # bias for shortcut + final 1x1 projection conv stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw' pool_type: str = 'avg2' downsample_pool_type: str = 'avg2' padding: str = '' attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2 attn_layer: str = 'se' attn_act_layer: str = 'silu' attn_ratio: float = 0.25 init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv act_layer: str = 'gelu' norm_layer: str = '' norm_layer_cl: str = '' norm_eps: Optional[float] = None def __post_init__(self): # mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args assert self.block_type in ('mbconv', 'convnext') use_mbconv = self.block_type == 'mbconv' if not self.norm_layer: self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d' if not self.norm_layer_cl and not use_mbconv: self.norm_layer_cl = 'layernorm' if self.norm_eps is None: self.norm_eps = 1e-5 if use_mbconv else 1e-6 self.downsample_pool_type = self.downsample_pool_type or self.pool_type @dataclass class MaxxVitCfg: embed_dim: Tuple[int, ...] = (96, 192, 384, 768) depths: Tuple[int, ...] = (2, 3, 5, 2) block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T') stem_width: Union[int, Tuple[int, int]] = 64 stem_bias: bool = False conv_cfg: MaxxVitConvCfg = field(default_factory=MaxxVitConvCfg) transformer_cfg: MaxxVitTransformerCfg = field(default_factory=MaxxVitTransformerCfg) head_hidden_size: int = None weight_init: str = 'vit_eff' class Attention2d(nn.Module): fused_attn: Final[bool] """ multi-head attention for 2D NCHW tensors""" def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, head_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first else dim self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, C, H, W = x.shape if self.head_first: q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) else: q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention( q.transpose(-1, -2).contiguous(), k.transpose(-1, -2).contiguous(), v.transpose(-1, -2).contiguous(), attn_mask=attn_bias, dropout_p=self.attn_drop.p, ).transpose(-1, -2).reshape(B, -1, H, W) else: q = q * self.scale attn = q.transpose(-2, -1) @ k if self.rel_pos is not None: attn = self.rel_pos(attn) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class AttentionCl(nn.Module): """ Channels-last multi-head attention (B, ..., C) """ fused_attn: Final[bool] def __init__( self, dim: int, dim_out: Optional[int] = None, dim_head: int = 32, bias: bool = True, expand_first: bool = True, head_first: bool = True, rel_pos_cls: Callable = None, attn_drop: float = 0., proj_drop: float = 0. ): super().__init__() dim_out = dim_out or dim dim_attn = dim_out if expand_first and dim_out > dim else dim assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim' self.num_heads = dim_attn // dim_head self.dim_head = dim_head self.head_first = head_first self.scale = dim_head ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias) self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim_attn, dim_out, bias=bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B = x.shape[0] restore_shape = x.shape[:-1] if self.head_first: q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3) else: q, k, v = self.qkv(x).reshape(B, -1, 3, self.num_heads, self.dim_head).transpose(1, 3).unbind(2) if self.fused_attn: attn_bias = None if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(restore_shape + (-1,)) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma return x.mul_(gamma) if self.inplace else x * gamma class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class Downsample2d(nn.Module): """ A downsample pooling module supporting several maxpool and avgpool modes * 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1 * 'max2' - MaxPool2d w/ kernel_size = stride = 2 * 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1 * 'avg2' - AvgPool2d w/ kernel_size = stride = 2 """ def __init__( self, dim: int, dim_out: int, pool_type: str = 'avg2', padding: str = '', bias: bool = True, ): super().__init__() assert pool_type in ('max', 'max2', 'avg', 'avg2') if pool_type == 'max': self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=padding or 1) elif pool_type == 'max2': self.pool = create_pool2d('max', 2, padding=padding or 0) # kernel_size == stride == 2 elif pool_type == 'avg': self.pool = create_pool2d( 'avg', kernel_size=3, stride=2, count_include_pad=False, padding=padding or 1) else: self.pool = create_pool2d('avg', 2, padding=padding or 0) if dim != dim_out: self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias) else: self.expand = nn.Identity() def forward(self, x): x = self.pool(x) # spatial downsample x = self.expand(x) # expand chs return x def _init_transformer(module, name, scheme=''): if isinstance(module, (nn.Conv2d, nn.Linear)): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # vit like nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) class TransformerBlock2d(nn.Module): """ Transformer block with 2D downsampling '2D' NCHW tensor layout Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs. This impl was faster on TPU w/ PT XLA than the 1D experiment. """ def __init__( self, dim: int, dim_out: int, stride: int = 1, rel_pos_cls: Callable = None, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) act_layer = get_act_layer(cfg.act_layer) if stride == 2: self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias) self.norm1 = nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)), ])) else: assert dim == dim_out self.shortcut = nn.Identity() self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim_out, dim_head=cfg.dim_head, expand_first=cfg.expand_first, bias=cfg.attn_bias, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop ) self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = ConvMlp( in_features=dim_out, hidden_features=int(dim_out * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def _init_conv(module, name, scheme=''): if isinstance(module, nn.Conv2d): if scheme == 'normal': nn.init.normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'trunc_normal': trunc_normal_tf_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif scheme == 'xavier_normal': nn.init.xavier_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # efficientnet like fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out)) if module.bias is not None: nn.init.zeros_(module.bias) def num_groups(group_size, channels): if not group_size: # 0 or None return 1 # normal conv with 1 group else: # NOTE group_size == 1 -> depthwise conv assert channels % group_size == 0 return channels // group_size class MbConvBlock(nn.Module): """ Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand) """ def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: float = 0. ): super(MbConvBlock, self).__init__() norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps) mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio) groups = num_groups(cfg.group_size, mid_chs) if stride == 2: self.shortcut = Downsample2d( in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias, padding=cfg.padding) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', '1x1', 'dw') stride_pool, stride_1, stride_2 = 1, 1, 1 if cfg.stride_mode == 'pool': # NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1 stride_pool, dilation_2 = stride, dilation[1] # FIXME handle dilation of avg pool elif cfg.stride_mode == '1x1': # NOTE I don't like this option described in paper, 1x1 w/ stride throws info away stride_1, dilation_2 = stride, dilation[1] else: stride_2, dilation_2 = stride, dilation[0] self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act) if stride_pool > 1: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type, padding=cfg.padding) else: self.down = nn.Identity() self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1) self.norm1 = norm_act_layer(mid_chs) self.conv2_kxk = create_conv2d( mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups, padding=cfg.padding) attn_kwargs = {} if isinstance(cfg.attn_layer, str): if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca': attn_kwargs['act_layer'] = cfg.attn_act_layer attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs)) # two different orderings for SE and norm2 (due to some weights and trials using SE before norm2) if cfg.attn_early: self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.norm2 = norm_act_layer(mid_chs) self.se = None else: self.se_early = None self.norm2 = norm_act_layer(mid_chs) self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs) self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): shortcut = self.shortcut(x) x = self.pre_norm(x) x = self.down(x) # 1x1 expansion conv & norm-act x = self.conv1_1x1(x) x = self.norm1(x) # depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act x = self.conv2_kxk(x) if self.se_early is not None: x = self.se_early(x) x = self.norm2(x) if self.se is not None: x = self.se(x) # 1x1 linear projection to output width x = self.conv3_1x1(x) x = self.drop_path(x) + shortcut return x class ConvNeXtBlock(nn.Module): """ ConvNeXt Block """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 7, stride: int = 1, dilation: Tuple[int, int] = (1, 1), cfg: MaxxVitConvCfg = MaxxVitConvCfg(), conv_mlp: bool = True, drop_path: float = 0. ): super().__init__() out_chs = out_chs or in_chs act_layer = get_act_layer(cfg.act_layer) if conv_mlp: norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) mlp_layer = ConvMlp else: assert 'layernorm' in cfg.norm_layer norm_layer = LayerNorm mlp_layer = Mlp self.use_conv_mlp = conv_mlp if stride == 2: self.shortcut = Downsample2d(in_chs, out_chs) elif in_chs != out_chs: self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias) else: self.shortcut = nn.Identity() assert cfg.stride_mode in ('pool', 'dw') stride_pool, stride_dw = 1, 1 # FIXME handle dilation? if cfg.stride_mode == 'pool': stride_pool = stride else: stride_dw = stride if stride_pool == 2: self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type) else: self.down = nn.Identity() self.conv_dw = create_conv2d( in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1], depthwise=True, bias=cfg.output_bias) self.norm = norm_layer(out_chs) self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer) if conv_mlp: self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() else: self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = self.shortcut(x) x = self.down(x) x = self.conv_dw(x) if self.use_conv_mlp: x = self.norm(x) x = self.mlp(x) x = self.ls(x) else: x = x.permute(0, 2, 3, 1) x = self.norm(x) x = self.mlp(x) x = self.ls(x) x = x.permute(0, 3, 1, 2) x = self.drop_path(x) + shortcut return x def window_partition(x, window_size: List[int]): B, H, W, C = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def grid_partition(x, grid_size: List[int]): B, H, W, C = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C) return x def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size): rel_pos_cls = None if cfg.rel_pos_type == 'mlp': rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim) elif cfg.rel_pos_type == 'bias': rel_pos_cls = partial(RelPosBias, window_size=window_size) elif cfg.rel_pos_type == 'bias_tf': rel_pos_cls = partial(RelPosBiasTf, window_size=window_size) return rel_pos_cls class PartitionAttentionCl(nn.Module): """ Grid or Block partition + Attn + FFN. NxC 'channels last' tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = AttentionCl( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] if self.partition_block: partitioned = window_partition(x, self.partition_size) else: partitioned = grid_partition(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse(partitioned, self.partition_size, img_size) else: x = grid_reverse(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ParallelPartitionAttention(nn.Module): """ Experimental. Grid and Block partition + single FFN NxC tensor layout. """ def __init__( self, dim: int, cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() assert dim % 2 == 0 norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) assert cfg.window_size == cfg.grid_size self.partition_size = to_2tuple(cfg.window_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn_block = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.attn_grid = AttentionCl( dim, dim // 2, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), out_features=dim, act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[1:3] partitioned_block = window_partition(x, self.partition_size) partitioned_block = self.attn_block(partitioned_block) x_window = window_reverse(partitioned_block, self.partition_size, img_size) partitioned_grid = grid_partition(x, self.partition_size) partitioned_grid = self.attn_grid(partitioned_grid) x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size) return torch.cat([x_window, x_grid], dim=-1) def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def window_partition_nchw(x, window_size: List[int]): B, C, H, W = x.shape _assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})') _assert(W % window_size[1] == 0, '') x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1]) windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1]) x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W) return x def grid_partition_nchw(x, grid_size: List[int]): B, C, H, W = x.shape _assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}') _assert(W % grid_size[1] == 0, '') x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1]) windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1]) return windows @register_notrace_function # reason: int argument is a Proxy def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]): H, W = img_size C = windows.shape[1] x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1]) x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W) return x class PartitionAttention2d(nn.Module): """ Grid or Block partition + Attn + FFN '2D' NCHW tensor layout. """ def __init__( self, dim: int, partition_type: str = 'block', cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last act_layer = get_act_layer(cfg.act_layer) self.partition_block = partition_type == 'block' self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size) rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size) self.norm1 = norm_layer(dim) self.attn = Attention2d( dim, dim, dim_head=cfg.dim_head, bias=cfg.attn_bias, head_first=cfg.head_first, rel_pos_cls=rel_pos_cls, attn_drop=cfg.attn_drop, proj_drop=cfg.proj_drop, ) self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = ConvMlp( in_features=dim, hidden_features=int(dim * cfg.expand_ratio), act_layer=act_layer, drop=cfg.proj_drop) self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def _partition_attn(self, x): img_size = x.shape[-2:] if self.partition_block: partitioned = window_partition_nchw(x, self.partition_size) else: partitioned = grid_partition_nchw(x, self.partition_size) partitioned = self.attn(partitioned) if self.partition_block: x = window_reverse_nchw(partitioned, self.partition_size, img_size) else: x = grid_reverse_nchw(partitioned, self.partition_size, img_size) return x def forward(self, x): x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class MaxxVitBlock(nn.Module): """ MaxVit conv, window partition + FFN , grid partition + FFN """ def __init__( self, dim: int, dim_out: int, stride: int = 1, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path: float = 0., ): super().__init__() self.nchw_attn = transformer_cfg.use_nchw_attn conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) partition_layer = PartitionAttention2d if self.nchw_attn else PartitionAttentionCl self.attn_block = None if transformer_cfg.no_block_attn else partition_layer(**attn_kwargs) self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs) def init_weights(self, scheme=''): if self.attn_block is not None: named_apply(partial(_init_transformer, scheme=scheme), self.attn_block) named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): # NCHW format x = self.conv(x) if not self.nchw_attn: x = x.permute(0, 2, 3, 1) # to NHWC (channels-last) if self.attn_block is not None: x = self.attn_block(x) x = self.attn_grid(x) if not self.nchw_attn: x = x.permute(0, 3, 1, 2) # back to NCHW return x class ParallelMaxxVitBlock(nn.Module): """ MaxVit block with parallel cat(window + grid), one FF Experimental timm block. """ def __init__( self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), drop_path=0., ): super().__init__() conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock if num_conv > 1: convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x class MaxxVitStage(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 2, depth: int = 4, feat_size: Tuple[int, int] = (14, 14), block_types: Union[str, Tuple[str]] = 'C', transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(), conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(), drop_path: Union[float, List[float]] = 0., ): super().__init__() self.grad_checkpointing = False block_types = extend_tuple(block_types, depth) blocks = [] for i, t in enumerate(block_types): block_stride = stride if i == 0 else 1 assert t in ('C', 'T', 'M', 'PM') if t == 'C': conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock blocks += [conv_cls( in_chs, out_chs, stride=block_stride, cfg=conv_cfg, drop_path=drop_path[i], )] elif t == 'T': rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size) blocks += [TransformerBlock2d( in_chs, out_chs, stride=block_stride, rel_pos_cls=rel_pos_cls, cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'M': blocks += [MaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] elif t == 'PM': blocks += [ParallelMaxxVitBlock( in_chs, out_chs, stride=block_stride, conv_cfg=conv_cfg, transformer_cfg=transformer_cfg, drop_path=drop_path[i], )] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class Stem(nn.Module): def __init__( self, in_chs: int, out_chs: int, kernel_size: int = 3, padding: str = '', bias: bool = False, act_layer: str = 'gelu', norm_layer: str = 'batchnorm2d', norm_eps: float = 1e-5, ): super().__init__() if not isinstance(out_chs, (list, tuple)): out_chs = to_2tuple(out_chs) norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps) self.out_chs = out_chs[-1] self.stride = 2 self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2, padding=padding, bias=bias) self.norm1 = norm_act_layer(out_chs[0]) self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1, padding=padding, bias=bias) def init_weights(self, scheme=''): named_apply(partial(_init_conv, scheme=scheme), self) def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) return x def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]): if cfg.window_size is not None: assert cfg.grid_size return cfg partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio cfg = replace(cfg, window_size=partition_size, grid_size=partition_size) return cfg def _overlay_kwargs(cfg: MaxxVitCfg, **kwargs): transformer_kwargs = {} conv_kwargs = {} base_kwargs = {} for k, v in kwargs.items(): if k.startswith('transformer_'): transformer_kwargs[k.replace('transformer_', '')] = v elif k.startswith('conv_'): conv_kwargs[k.replace('conv_', '')] = v else: base_kwargs[k] = v cfg = replace( cfg, transformer_cfg=replace(cfg.transformer_cfg, **transformer_kwargs), conv_cfg=replace(cfg.conv_cfg, **conv_kwargs), **base_kwargs ) return cfg class MaxxVit(nn.Module): """ CoaTNet + MaxVit base model. Highly configurable for different block compositions, tensor layouts, pooling types. """ def __init__( self, cfg: MaxxVitCfg, img_size: Union[int, Tuple[int, int]] = 224, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', drop_rate: float = 0., drop_path_rate: float = 0., **kwargs, ): super().__init__() img_size = to_2tuple(img_size) if kwargs: cfg = _overlay_kwargs(cfg, **kwargs) transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size) self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = cfg.embed_dim[-1] self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] self.stem = Stem( in_chs=in_chans, out_chs=cfg.stem_width, padding=cfg.conv_cfg.padding, bias=cfg.stem_bias, act_layer=cfg.conv_cfg.act_layer, norm_layer=cfg.conv_cfg.norm_layer, norm_eps=cfg.conv_cfg.norm_eps, ) stride = self.stem.stride self.feature_info += [dict(num_chs=self.stem.out_chs, reduction=2, module='stem')] feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))]) num_stages = len(cfg.embed_dim) assert len(cfg.depths) == num_stages dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] in_chs = self.stem.out_chs stages = [] for i in range(num_stages): stage_stride = 2 out_chs = cfg.embed_dim[i] feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size]) stages += [MaxxVitStage( in_chs, out_chs, depth=cfg.depths[i], block_types=cfg.block_type[i], conv_cfg=cfg.conv_cfg, transformer_cfg=transformer_cfg, feat_size=feat_size, drop_path=dpr[i], )] stride *= stage_stride in_chs = out_chs self.feature_info += [dict(num_chs=out_chs, reduction=stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) final_norm_layer = partial(get_norm_layer(cfg.transformer_cfg.norm_layer), eps=cfg.transformer_cfg.norm_eps) self.head_hidden_size = cfg.head_hidden_size if self.head_hidden_size: self.norm = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=self.head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=final_norm_layer, ) else: # standard classifier head w/ norm, pooling, fc classifier self.norm = final_norm_layer(self.num_features) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # Weight init (default PyTorch init works well for AdamW if scheme not set) assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff') if cfg.weight_init: named_apply(partial(self._init_weights, scheme=cfg.weight_init), self) def _init_weights(self, module, name, scheme=''): if hasattr(module, 'init_weights'): try: module.init_weights(scheme=scheme) except TypeError: module.init_weights() @torch.jit.ignore def no_weight_decay(self): return { k for k, _ in self.named_parameters() if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _rw_coat_cfg( stride_mode='pool', pool_type='avg2', conv_output_bias=False, conv_attn_early=False, conv_attn_act_layer='relu', conv_norm_layer='', transformer_shortcut_bias=True, transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Common differences for initial timm models: # - pre-norm layer in MZBConv included an activation after norm # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - SE act layer was relu, not silu # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj # Variable differences (evolved over training initial models): # - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat) # - SE attention was between conv2 and norm/act # - default to avg pool for mbconv downsample instead of 1x1 or dw conv # - transformer block shortcut has no bias return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, pre_norm_act=True, expand_output=False, output_bias=conv_output_bias, attn_early=conv_attn_early, attn_act_layer=conv_attn_act_layer, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, shortcut_bias=transformer_shortcut_bias, pool_type=pool_type, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _rw_max_cfg( stride_mode='dw', pool_type='avg2', conv_output_bias=False, conv_attn_ratio=1 / 16, conv_norm_layer='', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, dim_head=32, init_values=None, rel_pos_type='bias', rel_pos_dim=512, ): # 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit # Differences of initial timm models: # - mbconv expansion calculated from input instead of output chs # - mbconv shortcut and final 1x1 conv did not have a bias # - mbconv uses silu in timm, not gelu # - expansion in attention block done via output proj, not input proj return dict( conv_cfg=MaxxVitConvCfg( stride_mode=stride_mode, pool_type=pool_type, expand_output=False, output_bias=conv_output_bias, attn_ratio=conv_attn_ratio, act_layer='silu', norm_layer=conv_norm_layer, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, dim_head=dim_head, window_size=window_size, init_values=init_values, norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _next_cfg( stride_mode='dw', pool_type='avg2', conv_norm_layer='layernorm2d', conv_norm_layer_cl='layernorm', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, no_block_attn=False, init_values=1e-6, rel_pos_type='mlp', # MLP by default for maxxvit rel_pos_dim=512, ): # For experimental models with convnext instead of mbconv init_values = to_2tuple(init_values) return dict( conv_cfg=MaxxVitConvCfg( block_type='convnext', stride_mode=stride_mode, pool_type=pool_type, expand_output=False, init_values=init_values[0], norm_layer=conv_norm_layer, norm_layer_cl=conv_norm_layer_cl, ), transformer_cfg=MaxxVitTransformerCfg( expand_first=False, pool_type=pool_type, window_size=window_size, no_block_attn=no_block_attn, # enabled for MaxxViT-V2 init_values=init_values[1], norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim, ), ) def _tf_cfg(): return dict( conv_cfg=MaxxVitConvCfg( norm_eps=1e-3, act_layer='gelu_tanh', padding='same', ), transformer_cfg=MaxxVitTransformerCfg( norm_eps=1e-5, act_layer='gelu_tanh', head_first=False, # heads are interleaved (q_nh, q_hdim, k_nh, q_hdim, ....) rel_pos_type='bias_tf', ), ) model_cfgs = dict( # timm specific CoAtNet configs coatnet_pico_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 3, 5, 2), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( # using newer max defaults here stride_mode='pool', conv_output_bias=True, conv_attn_ratio=0.25, ), ), coatnet_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( conv_attn_early=True, transformer_shortcut_bias=False, ), ), coatnet_1_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, ) ), coatnet_2_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', #init_values=1e-6, ), ), coatnet_3_rw=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, ), ), # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) coatnet_bn_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', conv_attn_early=True, transformer_shortcut_bias=False, transformer_norm_layer='batchnorm2d', ) ), coatnet_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), **_rw_max_cfg( conv_output_bias=True, conv_attn_ratio=0.25, rel_pos_type='mlp', rel_pos_dim=384, ), ), coatnet_rmlp_0_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 7, 2), # deeper than paper '0' model stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', rel_pos_type='mlp', ), ), coatnet_rmlp_1_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( pool_type='max', conv_attn_early=True, transformer_shortcut_bias=False, rel_pos_type='mlp', rel_pos_dim=384, # was supposed to be 512, woops ), ), coatnet_rmlp_1_rw2=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=(32, 64), **_rw_coat_cfg( stride_mode='dw', rel_pos_type='mlp', rel_pos_dim=512, # was supposed to be 512, woops ), ), coatnet_rmlp_2_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=(64, 128), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_rmlp_3_rw=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=(96, 192), **_rw_coat_cfg( stride_mode='dw', conv_attn_act_layer='silu', init_values=1e-6, rel_pos_type='mlp' ), ), coatnet_nano_cc=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), block_type=('C', 'C', ('C', 'T'), ('C', 'T')), **_rw_coat_cfg(), ), coatnext_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(3, 4, 6, 3), stem_width=(32, 64), weight_init='normal', **_next_cfg( rel_pos_type='bias', init_values=(1e-5, None) ), ), # Trying to be like the CoAtNet paper configs coatnet_0=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 3, 5, 2), stem_width=64, head_hidden_size=768, ), coatnet_1=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), stem_width=64, head_hidden_size=768, ), coatnet_2=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), stem_width=128, head_hidden_size=1024, ), coatnet_3=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), stem_width=192, head_hidden_size=1536, ), coatnet_4=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=1536, ), coatnet_5=MaxxVitCfg( embed_dim=(256, 512, 1280, 2048), depths=(2, 12, 28, 2), stem_width=192, head_hidden_size=2048, ), # Experimental MaxVit configs maxvit_pico_rw=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(), ), maxvit_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_tiny_pm=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('PM',) * 4, stem_width=(32, 64), **_rw_max_cfg(), ), maxvit_rmlp_pico_rw=MaxxVitCfg( embed_dim=(32, 64, 128, 256), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(24, 32), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg(rel_pos_type='mlp'), ), maxvit_rmlp_small_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_rw_max_cfg( rel_pos_type='mlp', init_values=1e-6, ), ), maxvit_rmlp_base_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=(32, 64), head_hidden_size=768, **_rw_max_cfg( rel_pos_type='mlp', ), ), maxxvit_rmlp_nano_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(32, 64), weight_init='normal', **_next_cfg(), ), maxxvit_rmlp_tiny_rw=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(32, 64), **_next_cfg(), ), maxxvit_rmlp_small_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=(48, 96), **_next_cfg(), ), maxxvitv2_nano_rw=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(1, 2, 3, 1), block_type=('M',) * 4, stem_width=(48, 96), weight_init='normal', **_next_cfg( no_block_attn=True, rel_pos_type='bias', ), ), maxxvitv2_rmlp_base_rw=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 12, 2), block_type=('M',) * 4, stem_width=(64, 128), **_next_cfg( no_block_attn=True, ), ), maxxvitv2_rmlp_large_rw=MaxxVitCfg( embed_dim=(160, 320, 640, 1280), depths=(2, 6, 16, 2), block_type=('M',) * 4, stem_width=(80, 160), head_hidden_size=1280, **_next_cfg( no_block_attn=True, ), ), # Trying to be like the MaxViT paper configs maxvit_tiny_tf=MaxxVitCfg( embed_dim=(64, 128, 256, 512), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=512, **_tf_cfg(), ), maxvit_small_tf=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 2, 5, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg(), ), maxvit_base_tf=MaxxVitCfg( embed_dim=(96, 192, 384, 768), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=64, stem_bias=True, head_hidden_size=768, **_tf_cfg(), ), maxvit_large_tf=MaxxVitCfg( embed_dim=(128, 256, 512, 1024), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=128, stem_bias=True, head_hidden_size=1024, **_tf_cfg(), ), maxvit_xlarge_tf=MaxxVitCfg( embed_dim=(192, 384, 768, 1536), depths=(2, 6, 14, 2), block_type=('M',) * 4, stem_width=192, stem_bias=True, head_hidden_size=1536, **_tf_cfg(), ), ) def checkpoint_filter_fn(state_dict, model: nn.Module): model_state_dict = model.state_dict() out_dict = {} for k, v in state_dict.items(): if k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table( v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape, ) if k in model_state_dict and v.ndim != model_state_dict[k].ndim and v.numel() == model_state_dict[k].numel(): # adapt between conv2d / linear layers assert v.ndim in (2, 4) v = v.reshape(model_state_dict[k].shape) out_dict[k] = v return out_dict def _create_maxxvit(variant, cfg_variant=None, pretrained=False, **kwargs): if cfg_variant is None: if variant in model_cfgs: cfg_variant = variant else: cfg_variant = '_'.join(variant.split('_')[:-1]) return build_model_with_cfg( MaxxVit, variant, pretrained, model_cfg=model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.conv1', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = generate_default_cfgs({ # timm specific CoAtNet configs, ImageNet-1k pretrain, fixed rel-pos 'coatnet_pico_rw_224.untrained': _cfg(url=''), 'coatnet_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth', crop_pct=0.9), 'coatnet_0_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'), 'coatnet_1_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth' ), # timm specific CoAtNet configs, ImageNet-12k pretrain w/ 1k fine-tune, fixed rel-pos 'coatnet_2_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), #'coatnet_3_rw_224.untrained': _cfg(url=''), # Experimental CoAtNet configs w/ ImageNet-12k pretrain -> 1k fine-tune (different norm layers, MLP rel-pos) 'coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), # Experimental CoAtNet configs w/ ImageNet-1k train (different norm layers, MLP rel-pos) 'coatnet_bn_0_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, crop_pct=0.95), 'coatnet_rmlp_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth', crop_pct=0.9), 'coatnet_rmlp_0_rw_224.untrained': _cfg(url=''), 'coatnet_rmlp_1_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'), 'coatnet_rmlp_2_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'), 'coatnet_rmlp_3_rw_224.untrained': _cfg(url=''), 'coatnet_nano_cc_224.untrained': _cfg(url=''), 'coatnext_nano_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth', crop_pct=0.9), # ImagenNet-12k pretrain CoAtNet 'coatnet_2_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_3_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_1_rw2_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'coatnet_rmlp_2_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), # Trying to be like the CoAtNet paper configs (will adapt if 'tf' weights are ever released) 'coatnet_0_224.untrained': _cfg(url=''), 'coatnet_1_224.untrained': _cfg(url=''), 'coatnet_2_224.untrained': _cfg(url=''), 'coatnet_3_224.untrained': _cfg(url=''), 'coatnet_4_224.untrained': _cfg(url=''), 'coatnet_5_224.untrained': _cfg(url=''), # timm specific MaxVit configs, ImageNet-1k pretrain or untrained 'maxvit_pico_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'), 'maxvit_tiny_rw_256.untrained': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_tiny_pm_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), # timm specific MaxVit w/ MLP rel-pos, ImageNet-1k pretrain 'maxvit_rmlp_pico_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_tiny_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxvit_rmlp_small_rw_224.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth', crop_pct=0.9, ), 'maxvit_rmlp_small_rw_256.untrained': _cfg( url='', input_size=(3, 256, 256), pool_size=(8, 8)), # timm specific MaxVit w/ ImageNet-12k pretrain and 1k fine-tune 'maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', ), 'maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), # timm specific MaxVit w/ ImageNet-12k pretrain 'maxvit_rmlp_base_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, ), # timm MaxxViT configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks) 'maxxvit_rmlp_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_tiny_rw_256.untrained': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvit_rmlp_small_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth', input_size=(3, 256, 256), pool_size=(8, 8)), # timm MaxxViT-V2 configs (ConvNeXt conv blocks mixed with MaxVit transformer blocks, more width, no block attn) 'maxxvitv2_nano_rw_256.sw_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/'), 'maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxxvitv2_rmlp_large_rw_224.untrained': _cfg(url=''), 'maxxvitv2_rmlp_base_rw_224.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), # MaxViT models ported from official Tensorflow impl 'maxvit_tiny_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_tiny_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_tiny_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_small_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_small_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_base_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'maxvit_large_tf_384.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_base_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_base_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_large_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_large_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_224.in21k': _cfg( hf_hub_id='timm/', num_classes=21843), 'maxvit_xlarge_tf_384.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, crop_mode='squash'), 'maxvit_xlarge_tf_512.in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 512, 512), pool_size=(16, 16), crop_pct=1.0, crop_mode='squash'), }) @register_model def coatnet_pico_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_pico_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_bn_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_bn_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_0_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_0_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_1_rw2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_1_rw2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_2_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_2_rw_384', pretrained=pretrained, **kwargs) @register_model def coatnet_rmlp_3_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_rmlp_3_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_nano_cc_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_nano_cc_224', pretrained=pretrained, **kwargs) @register_model def coatnext_nano_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnext_nano_rw_224', pretrained=pretrained, **kwargs) @register_model def coatnet_0_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_0_224', pretrained=pretrained, **kwargs) @register_model def coatnet_1_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_1_224', pretrained=pretrained, **kwargs) @register_model def coatnet_2_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_2_224', pretrained=pretrained, **kwargs) @register_model def coatnet_3_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_3_224', pretrained=pretrained, **kwargs) @register_model def coatnet_4_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_4_224', pretrained=pretrained, **kwargs) @register_model def coatnet_5_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('coatnet_5_224', pretrained=pretrained, **kwargs) @register_model def maxvit_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_pico_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_pico_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_pm_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_pm_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_nano_rw_256(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_nano_rw_256', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_224', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_base_rw_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_base_rw_384', pretrained=pretrained, **kwargs) @register_model def maxxvitv2_rmlp_large_rw_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxxvitv2_rmlp_large_rw_224', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_224', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_384', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_tiny_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_tiny_tf_512', 'maxvit_tiny_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_224', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_384', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_small_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_small_tf_512', 'maxvit_small_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_224', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_384', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_base_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_base_tf_512', 'maxvit_base_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_224', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_384', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_large_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_large_tf_512', 'maxvit_large_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_224(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_224', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_384(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_384', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs) @register_model def maxvit_xlarge_tf_512(pretrained=False, **kwargs) -> MaxxVit: return _create_maxxvit('maxvit_xlarge_tf_512', 'maxvit_xlarge_tf', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/metaformer.py
""" Poolformer from MetaFormer is Actually What You Need for Vision https://arxiv.org/abs/2111.11418 IdentityFormer, RandFormer, PoolFormerV2, ConvFormer, and CAFormer from MetaFormer Baselines for Vision https://arxiv.org/abs/2210.13452 All implemented models support feature extraction and variable input resolution. Original implementation by Weihao Yu et al., adapted for timm by Fredo Guan and Ross Wightman. Adapted from https://github.com/sail-sg/metaformer, original copyright below """ # Copyright 2022 Garena Online Private Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, DropPath, SelectAdaptivePool2d, GroupNorm1, LayerNorm, LayerNorm2d, Mlp, \ use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['MetaFormer'] class Stem(nn.Module): """ Stem implemented by a layer of convolution. Conv2d params constant across all models. """ def __init__( self, in_channels, out_channels, norm_layer=None, ): super().__init__() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=7, stride=4, padding=2 ) self.norm = norm_layer(out_channels) if norm_layer else nn.Identity() def forward(self, x): x = self.conv(x) x = self.norm(x) return x class Downsampling(nn.Module): """ Downsampling implemented by a layer of convolution. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, norm_layer=None, ): super().__init__() self.norm = norm_layer(in_channels) if norm_layer else nn.Identity() self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding ) def forward(self, x): x = self.norm(x) x = self.conv(x) return x class Scale(nn.Module): """ Scale vector by element multiplications. """ def __init__(self, dim, init_value=1.0, trainable=True, use_nchw=True): super().__init__() self.shape = (dim, 1, 1) if use_nchw else (dim,) self.scale = nn.Parameter(init_value * torch.ones(dim), requires_grad=trainable) def forward(self, x): return x * self.scale.view(self.shape) class SquaredReLU(nn.Module): """ Squared ReLU: https://arxiv.org/abs/2109.08668 """ def __init__(self, inplace=False): super().__init__() self.relu = nn.ReLU(inplace=inplace) def forward(self, x): return torch.square(self.relu(x)) class StarReLU(nn.Module): """ StarReLU: s * relu(x) ** 2 + b """ def __init__( self, scale_value=1.0, bias_value=0.0, scale_learnable=True, bias_learnable=True, mode=None, inplace=False ): super().__init__() self.inplace = inplace self.relu = nn.ReLU(inplace=inplace) self.scale = nn.Parameter(scale_value * torch.ones(1), requires_grad=scale_learnable) self.bias = nn.Parameter(bias_value * torch.ones(1), requires_grad=bias_learnable) def forward(self, x): return self.scale * self.relu(x) ** 2 + self.bias class Attention(nn.Module): """ Vanilla self-attention from Transformer: https://arxiv.org/abs/1706.03762. Modified from timm. """ fused_attn: Final[bool] def __init__( self, dim, head_dim=32, num_heads=None, qkv_bias=False, attn_drop=0., proj_drop=0., proj_bias=False, **kwargs ): super().__init__() self.head_dim = head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.num_heads = num_heads if num_heads else dim // head_dim if self.num_heads == 0: self.num_heads = 1 self.attention_dim = self.num_heads * self.head_dim self.qkv = nn.Linear(dim, self.attention_dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.attention_dim, dim, bias=proj_bias) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p, ) else: attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x # custom norm modules that disable the bias term, since the original models defs # used a custom norm with a weight term but no bias term. class GroupNorm1NoBias(GroupNorm1): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-6) self.bias = None class LayerNorm2dNoBias(LayerNorm2d): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-6) self.bias = None class LayerNormNoBias(nn.LayerNorm): def __init__(self, num_channels, **kwargs): super().__init__(num_channels, **kwargs) self.eps = kwargs.get('eps', 1e-6) self.bias = None class SepConv(nn.Module): r""" Inverted separable convolution from MobileNetV2: https://arxiv.org/abs/1801.04381. """ def __init__( self, dim, expansion_ratio=2, act1_layer=StarReLU, act2_layer=nn.Identity, bias=False, kernel_size=7, padding=3, **kwargs ): super().__init__() mid_channels = int(expansion_ratio * dim) self.pwconv1 = nn.Conv2d(dim, mid_channels, kernel_size=1, bias=bias) self.act1 = act1_layer() self.dwconv = nn.Conv2d( mid_channels, mid_channels, kernel_size=kernel_size, padding=padding, groups=mid_channels, bias=bias) # depthwise conv self.act2 = act2_layer() self.pwconv2 = nn.Conv2d(mid_channels, dim, kernel_size=1, bias=bias) def forward(self, x): x = self.pwconv1(x) x = self.act1(x) x = self.dwconv(x) x = self.act2(x) x = self.pwconv2(x) return x class Pooling(nn.Module): """ Implementation of pooling for PoolFormer: https://arxiv.org/abs/2111.11418 """ def __init__(self, pool_size=3, **kwargs): super().__init__() self.pool = nn.AvgPool2d( pool_size, stride=1, padding=pool_size // 2, count_include_pad=False) def forward(self, x): y = self.pool(x) return y - x class MlpHead(nn.Module): """ MLP classification head """ def __init__( self, dim, num_classes=1000, mlp_ratio=4, act_layer=SquaredReLU, norm_layer=LayerNorm, drop_rate=0., bias=True ): super().__init__() hidden_features = int(mlp_ratio * dim) self.fc1 = nn.Linear(dim, hidden_features, bias=bias) self.act = act_layer() self.norm = norm_layer(hidden_features) self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias) self.head_drop = nn.Dropout(drop_rate) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.norm(x) x = self.head_drop(x) x = self.fc2(x) return x class MetaFormerBlock(nn.Module): """ Implementation of one MetaFormer block. """ def __init__( self, dim, token_mixer=Pooling, mlp_act=StarReLU, mlp_bias=False, norm_layer=LayerNorm2d, proj_drop=0., drop_path=0., use_nchw=True, layer_scale_init_value=None, res_scale_init_value=None, **kwargs ): super().__init__() ls_layer = partial(Scale, dim=dim, init_value=layer_scale_init_value, use_nchw=use_nchw) rs_layer = partial(Scale, dim=dim, init_value=res_scale_init_value, use_nchw=use_nchw) self.norm1 = norm_layer(dim) self.token_mixer = token_mixer(dim=dim, proj_drop=proj_drop, **kwargs) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.layer_scale1 = ls_layer() if layer_scale_init_value is not None else nn.Identity() self.res_scale1 = rs_layer() if res_scale_init_value is not None else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( dim, int(4 * dim), act_layer=mlp_act, bias=mlp_bias, drop=proj_drop, use_conv=use_nchw, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.layer_scale2 = ls_layer() if layer_scale_init_value is not None else nn.Identity() self.res_scale2 = rs_layer() if res_scale_init_value is not None else nn.Identity() def forward(self, x): x = self.res_scale1(x) + \ self.layer_scale1( self.drop_path1( self.token_mixer(self.norm1(x)) ) ) x = self.res_scale2(x) + \ self.layer_scale2( self.drop_path2( self.mlp(self.norm2(x)) ) ) return x class MetaFormerStage(nn.Module): def __init__( self, in_chs, out_chs, depth=2, token_mixer=nn.Identity, mlp_act=StarReLU, mlp_bias=False, downsample_norm=LayerNorm2d, norm_layer=LayerNorm2d, proj_drop=0., dp_rates=[0.] * 2, layer_scale_init_value=None, res_scale_init_value=None, **kwargs, ): super().__init__() self.grad_checkpointing = False self.use_nchw = not issubclass(token_mixer, Attention) # don't downsample if in_chs and out_chs are the same self.downsample = nn.Identity() if in_chs == out_chs else Downsampling( in_chs, out_chs, kernel_size=3, stride=2, padding=1, norm_layer=downsample_norm, ) self.blocks = nn.Sequential(*[MetaFormerBlock( dim=out_chs, token_mixer=token_mixer, mlp_act=mlp_act, mlp_bias=mlp_bias, norm_layer=norm_layer, proj_drop=proj_drop, drop_path=dp_rates[i], layer_scale_init_value=layer_scale_init_value, res_scale_init_value=res_scale_init_value, use_nchw=self.use_nchw, **kwargs, ) for i in range(depth)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x: Tensor): x = self.downsample(x) B, C, H, W = x.shape if not self.use_nchw: x = x.reshape(B, C, -1).transpose(1, 2) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) if not self.use_nchw: x = x.transpose(1, 2).reshape(B, C, H, W) return x class MetaFormer(nn.Module): r""" MetaFormer A PyTorch impl of : `MetaFormer Baselines for Vision` - https://arxiv.org/abs/2210.13452 Args: in_chans (int): Number of input image channels. num_classes (int): Number of classes for classification head. global_pool: Pooling for classifier head. depths (list or tuple): Number of blocks at each stage. dims (list or tuple): Feature dimension at each stage. token_mixers (list, tuple or token_fcn): Token mixer for each stage. mlp_act: Activation layer for MLP. mlp_bias (boolean): Enable or disable mlp bias term. drop_path_rate (float): Stochastic depth rate. drop_rate (float): Dropout rate. layer_scale_init_values (list, tuple, float or None): Init value for Layer Scale. None means not use the layer scale. Form: https://arxiv.org/abs/2103.17239. res_scale_init_values (list, tuple, float or None): Init value for res Scale on residual connections. None means not use the res scale. From: https://arxiv.org/abs/2110.09456. downsample_norm (nn.Module): Norm layer used in stem and downsampling layers. norm_layers (list, tuple or norm_fcn): Norm layers for each stage. output_norm: Norm layer before classifier head. use_mlp_head: Use MLP classification head. """ def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', depths=(2, 2, 6, 2), dims=(64, 128, 320, 512), token_mixers=Pooling, mlp_act=StarReLU, mlp_bias=False, drop_path_rate=0., proj_drop_rate=0., drop_rate=0.0, layer_scale_init_values=None, res_scale_init_values=(None, None, 1.0, 1.0), downsample_norm=LayerNorm2dNoBias, norm_layers=LayerNorm2dNoBias, output_norm=LayerNorm2d, use_mlp_head=True, **kwargs, ): super().__init__() self.num_classes = num_classes self.num_features = dims[-1] self.drop_rate = drop_rate self.use_mlp_head = use_mlp_head self.num_stages = len(depths) # convert everything to lists if they aren't indexable if not isinstance(depths, (list, tuple)): depths = [depths] # it means the model has only one stage if not isinstance(dims, (list, tuple)): dims = [dims] if not isinstance(token_mixers, (list, tuple)): token_mixers = [token_mixers] * self.num_stages if not isinstance(norm_layers, (list, tuple)): norm_layers = [norm_layers] * self.num_stages if not isinstance(layer_scale_init_values, (list, tuple)): layer_scale_init_values = [layer_scale_init_values] * self.num_stages if not isinstance(res_scale_init_values, (list, tuple)): res_scale_init_values = [res_scale_init_values] * self.num_stages self.grad_checkpointing = False self.feature_info = [] self.stem = Stem( in_chans, dims[0], norm_layer=downsample_norm ) stages = [] prev_dim = dims[0] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] for i in range(self.num_stages): stages += [MetaFormerStage( prev_dim, dims[i], depth=depths[i], token_mixer=token_mixers[i], mlp_act=mlp_act, mlp_bias=mlp_bias, proj_drop=proj_drop_rate, dp_rates=dp_rates[i], layer_scale_init_value=layer_scale_init_values[i], res_scale_init_value=res_scale_init_values[i], downsample_norm=downsample_norm, norm_layer=norm_layers[i], **kwargs, )] prev_dim = dims[i] self.feature_info += [dict(num_chs=dims[i], reduction=2, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) # if using MlpHead, dropout is handled by MlpHead if num_classes > 0: if self.use_mlp_head: final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) else: final = nn.Linear(self.num_features, num_classes) else: final = nn.Identity() self.head = nn.Sequential(OrderedDict([ ('global_pool', SelectAdaptivePool2d(pool_type=global_pool)), ('norm', output_norm(self.num_features)), ('flatten', nn.Flatten(1) if global_pool else nn.Identity()), ('drop', nn.Dropout(drop_rate) if self.use_mlp_head else nn.Identity()), ('fc', final) ])) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, (nn.Conv2d, nn.Linear)): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for stage in self.stages: stage.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes=0, global_pool=None): if global_pool is not None: self.head.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.head.flatten = nn.Flatten(1) if global_pool else nn.Identity() if num_classes > 0: if self.use_mlp_head: final = MlpHead(self.num_features, num_classes, drop_rate=self.drop_rate) else: final = nn.Linear(self.num_features, num_classes) else: final = nn.Identity() self.head.fc = final def forward_head(self, x: Tensor, pre_logits: bool = False): # NOTE nn.Sequential in head broken down since can't call head[:-1](x) in torchscript :( x = self.head.global_pool(x) x = self.head.norm(x) x = self.head.flatten(x) x = self.head.drop(x) return x if pre_logits else self.head.fc(x) def forward_features(self, x: Tensor): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward(self, x: Tensor): x = self.forward_features(x) x = self.forward_head(x) return x # this works but it's long and breaks backwards compatability with weights from the poolformer-only impl def checkpoint_filter_fn(state_dict, model): if 'stem.conv.weight' in state_dict: return state_dict import re out_dict = {} is_poolformerv1 = 'network.0.0.mlp.fc1.weight' in state_dict model_state_dict = model.state_dict() for k, v in state_dict.items(): if is_poolformerv1: k = re.sub(r'layer_scale_([0-9]+)', r'layer_scale\1.scale', k) k = k.replace('network.1', 'downsample_layers.1') k = k.replace('network.3', 'downsample_layers.2') k = k.replace('network.5', 'downsample_layers.3') k = k.replace('network.2', 'network.1') k = k.replace('network.4', 'network.2') k = k.replace('network.6', 'network.3') k = k.replace('network', 'stages') k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k) k = k.replace('downsample.proj', 'downsample.conv') k = k.replace('patch_embed.proj', 'patch_embed.conv') k = re.sub(r'([0-9]+).([0-9]+)', r'\1.blocks.\2', k) k = k.replace('stages.0.downsample', 'patch_embed') k = k.replace('patch_embed', 'stem') k = k.replace('post_norm', 'norm') k = k.replace('pre_norm', 'norm') k = re.sub(r'^head', 'head.fc', k) k = re.sub(r'^norm', 'head.norm', k) if v.shape != model_state_dict[k] and v.numel() == model_state_dict[k].numel(): v = v.reshape(model_state_dict[k].shape) out_dict[k] = v return out_dict def _create_metaformer(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (2, 2, 6, 2)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( MetaFormer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': 'head.fc', 'first_conv': 'stem.conv', **kwargs } default_cfgs = generate_default_cfgs({ 'poolformer_s12.sail_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9), 'poolformer_s24.sail_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9), 'poolformer_s36.sail_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.9), 'poolformer_m36.sail_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95), 'poolformer_m48.sail_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95), 'poolformerv2_s12.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_s24.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_s36.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_m36.sail_in1k': _cfg(hf_hub_id='timm/'), 'poolformerv2_m48.sail_in1k': _cfg(hf_hub_id='timm/'), 'convformer_s18.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s18.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s18.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s18.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s18.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_s36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_s36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_s36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_m36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_m36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_m36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_m36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_m36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'convformer_b36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_b36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_b36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'convformer_b36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'convformer_b36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_s18.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s18.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s18.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s18.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s18.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_s36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_s36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_s36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_m36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_m36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_m36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_m36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_m36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), 'caformer_b36.sail_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_b36.sail_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_b36.sail_in22k_ft_in1k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2'), 'caformer_b36.sail_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', input_size=(3, 384, 384), pool_size=(12, 12)), 'caformer_b36.sail_in22k': _cfg( hf_hub_id='timm/', classifier='head.fc.fc2', num_classes=21841), }) @register_model def poolformer_s12(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-5, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s12', pretrained=pretrained, **model_kwargs) @register_model def poolformer_s24(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-5, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s24', pretrained=pretrained, **model_kwargs) @register_model def poolformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-6, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_s36', pretrained=pretrained, **model_kwargs) @register_model def poolformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-6, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_m36', pretrained=pretrained, **model_kwargs) @register_model def poolformer_m48(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], downsample_norm=None, mlp_act=nn.GELU, mlp_bias=True, norm_layers=GroupNorm1, layer_scale_init_values=1e-6, res_scale_init_values=None, use_mlp_head=False, **kwargs) return _create_metaformer('poolformer_m48', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s12(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[2, 2, 6, 2], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s12', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s24(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[4, 4, 12, 4], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s24', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_s36', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[6, 6, 18, 6], dims=[96, 192, 384, 768], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_m36', pretrained=pretrained, **model_kwargs) @register_model def poolformerv2_m48(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[8, 8, 24, 8], dims=[96, 192, 384, 768], norm_layers=GroupNorm1NoBias, use_mlp_head=False, **kwargs) return _create_metaformer('poolformerv2_m48', pretrained=pretrained, **model_kwargs) @register_model def convformer_s18(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_s18', pretrained=pretrained, **model_kwargs) @register_model def convformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_s36', pretrained=pretrained, **model_kwargs) @register_model def convformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_m36', pretrained=pretrained, **model_kwargs) @register_model def convformer_b36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, norm_layers=LayerNorm2dNoBias, **kwargs) return _create_metaformer('convformer_b36', pretrained=pretrained, **model_kwargs) @register_model def caformer_s18(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 3, 9, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_s18', pretrained=pretrained, **model_kwargs) @register_model def caformer_s36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[64, 128, 320, 512], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_s36', pretrained=pretrained, **model_kwargs) @register_model def caformer_m36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[96, 192, 384, 576], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_m36', pretrained=pretrained, **model_kwargs) @register_model def caformer_b36(pretrained=False, **kwargs) -> MetaFormer: model_kwargs = dict( depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=[SepConv, SepConv, Attention, Attention], norm_layers=[LayerNorm2dNoBias] * 2 + [LayerNormNoBias] * 2, **kwargs) return _create_metaformer('caformer_b36', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/mlp_mixer.py
""" MLP-Mixer, ResMLP, and gMLP in PyTorch This impl originally based on MLP-Mixer paper. Official JAX impl: https://github.com/google-research/vision_transformer/blob/linen/vit_jax/models_mixer.py Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 @article{tolstikhin2021, title={MLP-Mixer: An all-MLP Architecture for Vision}, author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey}, journal={arXiv preprint arXiv:2105.01601}, year={2021} } Also supporting ResMlp, and a preliminary (not verified) implementations of gMLP Code: https://github.com/facebookresearch/deit Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 @misc{touvron2021resmlp, title={ResMLP: Feedforward networks for image classification with data-efficient training}, author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and Edouard Grave and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Hervé Jégou}, year={2021}, eprint={2105.03404}, } Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 @misc{liu2021pay, title={Pay Attention to MLPs}, author={Hanxiao Liu and Zihang Dai and David R. So and Quoc V. Le}, year={2021}, eprint={2105.08050}, } A thank you to paper authors for releasing code and weights. Hacked together by / Copyright 2021 Ross Wightman """ import math from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, GluMlp, GatedMlp, DropPath, lecun_normal_, to_2tuple from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MixerBlock', 'MlpMixer'] # model_registry will add each entrypoint fn to this class MixerBlock(nn.Module): """ Residual Block w/ token mixing and channel MLPs Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ def __init__( self, dim, seq_len, mlp_ratio=(0.5, 4.0), mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0., ): super().__init__() tokens_dim, channels_dim = [int(x * dim) for x in to_2tuple(mlp_ratio)] self.norm1 = norm_layer(dim) self.mlp_tokens = mlp_layer(seq_len, tokens_dim, act_layer=act_layer, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Affine(nn.Module): def __init__(self, dim): super().__init__() self.alpha = nn.Parameter(torch.ones((1, 1, dim))) self.beta = nn.Parameter(torch.zeros((1, 1, dim))) def forward(self, x): return torch.addcmul(self.beta, self.alpha, x) class ResBlock(nn.Module): """ Residual MLP block w/ LayerScale and Affine 'norm' Based on: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ def __init__( self, dim, seq_len, mlp_ratio=4, mlp_layer=Mlp, norm_layer=Affine, act_layer=nn.GELU, init_values=1e-4, drop=0., drop_path=0., ): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm1 = norm_layer(dim) self.linear_tokens = nn.Linear(seq_len, seq_len) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, drop=drop) self.ls1 = nn.Parameter(init_values * torch.ones(dim)) self.ls2 = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): x = x + self.drop_path(self.ls1 * self.linear_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)) x = x + self.drop_path(self.ls2 * self.mlp_channels(self.norm2(x))) return x class SpatialGatingUnit(nn.Module): """ Spatial Gating Unit Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__(self, dim, seq_len, norm_layer=nn.LayerNorm): super().__init__() gate_dim = dim // 2 self.norm = norm_layer(gate_dim) self.proj = nn.Linear(seq_len, seq_len) def init_weights(self): # special init for the projection gate, called as override by base model init nn.init.normal_(self.proj.weight, std=1e-6) nn.init.ones_(self.proj.bias) def forward(self, x): u, v = x.chunk(2, dim=-1) v = self.norm(v) v = self.proj(v.transpose(-1, -2)) return u * v.transpose(-1, -2) class SpatialGatingBlock(nn.Module): """ Residual Block w/ Spatial Gating Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ def __init__( self, dim, seq_len, mlp_ratio=4, mlp_layer=GatedMlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop=0., drop_path=0., ): super().__init__() channel_dim = int(dim * mlp_ratio) self.norm = norm_layer(dim) sgu = partial(SpatialGatingUnit, seq_len=seq_len) self.mlp_channels = mlp_layer(dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path(self.mlp_channels(self.norm(x))) return x class MlpMixer(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, patch_size=16, num_blocks=8, embed_dim=512, mlp_ratio=(0.5, 4.0), block_layer=MixerBlock, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, global_pool='avg', ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.grad_checkpointing = False self.stem = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None, ) # FIXME drop_path (stochastic depth scaling rule or all the same?) self.blocks = nn.Sequential(*[ block_layer( embed_dim, self.stem.num_patches, mlp_ratio, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, drop=proj_drop_rate, drop_path=drop_path_rate, ) for _ in range(num_blocks)]) self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.init_weights(nlhb=nlhb) @torch.jit.ignore def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): """ Mixer weight initialization (trying to match Flax defaults) """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: # like MLP init in vit (my original init) nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): # NOTE if a parent module contains init_weights method, it can override the init of the # child modules as this will be called in depth-first order. module.init_weights() def checkpoint_filter_fn(state_dict, model): """ Remap checkpoints if needed """ if 'patch_embed.proj.weight' in state_dict: # Remap FB ResMlp models -> timm out_dict = {} for k, v in state_dict.items(): k = k.replace('patch_embed.', 'stem.') k = k.replace('attn.', 'linear_tokens.') k = k.replace('mlp.', 'mlp_channels.') k = k.replace('gamma_', 'ls') if k.endswith('.alpha') or k.endswith('.beta'): v = v.reshape(1, 1, -1) out_dict[k] = v return out_dict return state_dict def _create_mixer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for MLP-Mixer models.') model = build_model_with_cfg( MlpMixer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 0.875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'stem.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'mixer_s32_224.untrained': _cfg(), 'mixer_s16_224.untrained': _cfg(), 'mixer_b32_224.untrained': _cfg(), 'mixer_b16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth', ), 'mixer_b16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth', num_classes=21843 ), 'mixer_l32_224.untrained': _cfg(), 'mixer_l16_224.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224-92f9adc4.pth', ), 'mixer_l16_224.goog_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_l16_224_in21k-846aa33c.pth', num_classes=21843 ), # Mixer ImageNet-21K-P pretraining 'mixer_b16_224.miil_in21k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil_in21k-2a558a71.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221, ), 'mixer_b16_224.miil_in21k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mixer_b16_224_miil-9229a591.pth', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', ), 'gmixer_12_224.untrained': _cfg(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmixer_24_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmixer_24_224_raa-7daf7ae6.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_no_dist.pth', #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resmlp_24_224_raa-a8256759.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_no_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_36_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_36_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_distilled_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_dist.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_big_24_224.fb_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlpB_24_22k.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_12_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_12_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'resmlp_24_224.fb_dino': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/resmlp_24_dino.pth', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD), 'gmlp_ti16_224.untrained': _cfg(), 'gmlp_s16_224.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/gmlp_s16_224_raa-10536d42.pth', ), 'gmlp_b16_224.untrained': _cfg(), }) @register_model def mixer_s32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-S/16 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs) model = _create_mixer('mixer_s16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/32 224x224 Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-B/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs) model = _create_mixer('mixer_b16_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l32_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/32 224x224. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l32_224', pretrained=pretrained, **model_args) return model @register_model def mixer_l16_224(pretrained=False, **kwargs) -> MlpMixer: """ Mixer-L/16 224x224. ImageNet-1k pretrained weights. Paper: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601 """ model_args = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs) model = _create_mixer('mixer_l16_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_12_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-12 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_12_224', pretrained=pretrained, **model_args) return model @register_model def gmixer_24_224(pretrained=False, **kwargs) -> MlpMixer: """ Glu-Mixer-24 224x224 Experiment by Ross Wightman, adding SwiGLU to MLP-Mixer """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=(1.0, 4.0), mlp_layer=GluMlp, act_layer=nn.SiLU, **kwargs) model = _create_mixer('gmixer_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_12_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-12 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=12, embed_dim=384, mlp_ratio=4, block_layer=ResBlock, norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_12_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=24, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-5), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_24_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_36_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-36 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=16, num_blocks=36, embed_dim=384, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_36_224', pretrained=pretrained, **model_args) return model @register_model def resmlp_big_24_224(pretrained=False, **kwargs) -> MlpMixer: """ ResMLP-B-24 Paper: `ResMLP: Feedforward networks for image classification...` - https://arxiv.org/abs/2105.03404 """ model_args = dict( patch_size=8, num_blocks=24, embed_dim=768, mlp_ratio=4, block_layer=partial(ResBlock, init_values=1e-6), norm_layer=Affine, **kwargs) model = _create_mixer('resmlp_big_24_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_ti16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Tiny Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=128, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_ti16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_s16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Small Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=256, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_s16_224', pretrained=pretrained, **model_args) return model @register_model def gmlp_b16_224(pretrained=False, **kwargs) -> MlpMixer: """ gMLP-Base Paper: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050 """ model_args = dict( patch_size=16, num_blocks=30, embed_dim=512, mlp_ratio=6, block_layer=SpatialGatingBlock, mlp_layer=GatedMlp, **kwargs) model = _create_mixer('gmlp_b16_224', pretrained=pretrained, **model_args) return model register_model_deprecations(__name__, { 'mixer_b16_224_in21k': 'mixer_b16_224.goog_in21k_ft_in1k', 'mixer_l16_224_in21k': 'mixer_l16_224.goog_in21k_ft_in1k', 'mixer_b16_224_miil': 'mixer_b16_224.miil_in21k_ft_in1k', 'mixer_b16_224_miil_in21k': 'mixer_b16_224.miil_in21k', 'resmlp_12_distilled_224': 'resmlp_12_224.fb_distilled_in1k', 'resmlp_24_distilled_224': 'resmlp_24_224.fb_distilled_in1k', 'resmlp_36_distilled_224': 'resmlp_36_224.fb_distilled_in1k', 'resmlp_big_24_distilled_224': 'resmlp_big_24_224.fb_distilled_in1k', 'resmlp_big_24_224_in22ft1k': 'resmlp_big_24_224.fb_in22k_ft_in1k', 'resmlp_12_224_dino': 'resmlp_12_224', 'resmlp_24_224_dino': 'resmlp_24_224', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/mobilenetv3.py
""" MobileNet V3 A PyTorch impl of MobileNet-V3, compatible with TF weights from official impl. Paper: Searching for MobileNetV3 - https://arxiv.org/abs/1905.02244 Hacked together by / Copyright 2019, Ross Wightman """ from functools import partial from typing import List import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import SelectAdaptivePool2d, Linear, create_conv2d, get_norm_act_layer from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._efficientnet_blocks import SqueezeExcite from ._efficientnet_builder import EfficientNetBuilder, decode_arch_def, efficientnet_init_weights, \ round_channels, resolve_bn_args, resolve_act_layer, BN_EPS_TF_DEFAULT from ._features import FeatureInfo, FeatureHooks from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['MobileNetV3', 'MobileNetV3Features'] class MobileNetV3(nn.Module): """ MobiletNet-V3 Based on my EfficientNet implementation and building blocks, this model utilizes the MobileNet-v3 specific 'efficient head', where global pooling is done before the head convolution without a final batch-norm layer before the classifier. Paper: `Searching for MobileNetV3` - https://arxiv.org/abs/1905.02244 Other architectures utilizing MobileNet-V3 efficient head that are supported by this impl include: * HardCoRe-NAS - https://arxiv.org/abs/2102.11646 (defn in hardcorenas.py uses this class) * FBNet-V3 - https://arxiv.org/abs/2006.02049 * LCNet - https://arxiv.org/abs/2109.15099 """ def __init__( self, block_args, num_classes=1000, in_chans=3, stem_size=16, fix_stem=False, num_features=1280, head_bias=True, pad_type='', act_layer=None, norm_layer=None, se_layer=None, se_from_exp=True, round_chs_fn=round_channels, drop_rate=0., drop_path_rate=0., global_pool='avg', ): super(MobileNetV3, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d norm_act_layer = get_norm_act_layer(norm_layer, act_layer) se_layer = se_layer or SqueezeExcite self.num_classes = num_classes self.num_features = num_features self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_act_layer(stem_size, inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=32, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = builder.features head_chs = builder.in_chs # Head + Pooling self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) num_pooled_chs = head_chs * self.global_pool.feat_mult() self.conv_head = create_conv2d(num_pooled_chs, self.num_features, 1, padding=pad_type, bias=head_bias) self.act2 = act_layer(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() efficientnet_init_weights(self) def as_sequential(self): layers = [self.conv_stem, self.bn1] layers.extend(self.blocks) layers.extend([self.global_pool, self.conv_head, self.act2]) layers.extend([nn.Flatten(), nn.Dropout(self.drop_rate), self.classifier]) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv_stem|bn1', blocks=r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes # cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if pre_logits: return x if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return self.classifier(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x class MobileNetV3Features(nn.Module): """ MobileNetV3 Feature Extractor A work-in-progress feature extraction module for MobileNet-V3 to use as a backbone for segmentation and object detection models. """ def __init__( self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, stem_size=16, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=True, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0., drop_path_rate=0., ): super(MobileNetV3Features, self).__init__() act_layer = act_layer or nn.ReLU norm_layer = norm_layer or nn.BatchNorm2d se_layer = se_layer or SqueezeExcite self.drop_rate = drop_rate self.grad_checkpointing = False # Stem if not fix_stem: stem_size = round_chs_fn(stem_size) self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type) self.bn1 = norm_layer(stem_size) self.act1 = act_layer(inplace=True) # Middle stages (IR/ER/DS Blocks) builder = EfficientNetBuilder( output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location, ) self.blocks = nn.Sequential(*builder(stem_size, block_args)) self.feature_info = FeatureInfo(builder.features, out_indices) self._stage_out_idx = {f['stage']: f['index'] for f in self.feature_info.get_dicts()} efficientnet_init_weights(self) # Register feature extraction hooks with FeatureHooks helper self.feature_hooks = None if feature_location != 'bottleneck': hooks = self.feature_info.get_dicts(keys=('module', 'hook_type')) self.feature_hooks = FeatureHooks(hooks, self.named_modules()) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x) -> List[torch.Tensor]: x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.feature_hooks is None: features = [] if 0 in self._stage_out_idx: features.append(x) # add stem out for i, b in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(b, x) else: x = b(x) if i + 1 in self._stage_out_idx: features.append(x) return features else: self.blocks(x) out = self.feature_hooks.get_output(x.device) return list(out.values()) def _create_mnv3(variant, pretrained=False, **kwargs): features_mode = '' model_cls = MobileNetV3 kwargs_filter = None if kwargs.pop('features_only', False): if 'feature_cfg' in kwargs: features_mode = 'cfg' else: kwargs_filter = ('num_classes', 'num_features', 'head_conv', 'head_bias', 'global_pool') model_cls = MobileNetV3Features features_mode = 'cls' model = build_model_with_cfg( model_cls, variant, pretrained, features_only=features_mode == 'cfg', pretrained_strict=features_mode != 'cls', kwargs_filter=kwargs_filter, **kwargs, ) if features_mode == 'cls': model.default_cfg = pretrained_cfg_for_features(model.default_cfg) return model def _gen_mobilenet_v3_rw(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MobileNet-V3 model. Ref impl: ? Paper: https://arxiv.org/abs/1905.02244 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_nre_noskip'], # relu # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], # hard-swish ] model_kwargs = dict( block_args=decode_arch_def(arch_def), head_bias=False, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid'), **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_mobilenet_v3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """Creates a MobileNet-V3 model. Ref impl: ? Paper: https://arxiv.org/abs/1905.02244 Args: channel_multiplier: multiplier to number of channels per layer. """ if 'small' in variant: num_features = 1024 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s2_e1_c16'], # stage 1, 56x56 in ['ir_r1_k3_s2_e4.5_c24', 'ir_r1_k3_s1_e3.67_c24'], # stage 2, 28x28 in ['ir_r1_k3_s2_e4_c40', 'ir_r2_k3_s1_e6_c40'], # stage 3, 14x14 in ['ir_r2_k3_s1_e3_c48'], # stage 4, 14x14in ['ir_r3_k3_s2_e6_c96'], # stage 6, 7x7 in ['cn_r1_k1_s1_c576'], ] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s2_e1_c16_se0.25_nre'], # relu # stage 1, 56x56 in ['ir_r1_k3_s2_e4.5_c24_nre', 'ir_r1_k3_s1_e3.67_c24_nre'], # relu # stage 2, 28x28 in ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r2_k5_s1_e6_c40_se0.25'], # hard-swish # stage 3, 14x14 in ['ir_r2_k5_s1_e3_c48_se0.25'], # hard-swish # stage 4, 14x14in ['ir_r3_k5_s2_e6_c96_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c576'], # hard-swish ] else: num_features = 1280 if 'minimal' in variant: act_layer = resolve_act_layer(kwargs, 'relu') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16'], # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24', 'ir_r1_k3_s1_e3_c24'], # stage 2, 56x56 in ['ir_r3_k3_s2_e3_c40'], # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112'], # stage 5, 14x14in ['ir_r3_k3_s2_e6_c160'], # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], ] else: act_layer = resolve_act_layer(kwargs, 'hard_swish') arch_def = [ # stage 0, 112x112 in ['ds_r1_k3_s1_e1_c16_nre'], # relu # stage 1, 112x112 in ['ir_r1_k3_s2_e4_c24_nre', 'ir_r1_k3_s1_e3_c24_nre'], # relu # stage 2, 56x56 in ['ir_r3_k5_s2_e3_c40_se0.25_nre'], # relu # stage 3, 28x28 in ['ir_r1_k3_s2_e6_c80', 'ir_r1_k3_s1_e2.5_c80', 'ir_r2_k3_s1_e2.3_c80'], # hard-swish # stage 4, 14x14in ['ir_r2_k3_s1_e6_c112_se0.25'], # hard-swish # stage 5, 14x14in ['ir_r3_k5_s2_e6_c160_se0.25'], # hard-swish # stage 6, 7x7 in ['cn_r1_k1_s1_c960'], # hard-swish ] se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels) model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=num_features, stem_size=16, fix_stem=channel_multiplier < 0.75, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_fbnetv3(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ FBNetV3 Paper: `FBNetV3: Joint Architecture-Recipe Search using Predictor Pretraining` - https://arxiv.org/abs/2006.02049 FIXME untested, this is a preliminary impl of some FBNet-V3 variants. """ vl = variant.split('_')[-1] if vl in ('a', 'b'): stem_size = 16 arch_def = [ ['ds_r2_k3_s1_e1_c16'], ['ir_r1_k5_s2_e4_c24', 'ir_r3_k5_s1_e2_c24'], ['ir_r1_k5_s2_e5_c40_se0.25', 'ir_r4_k5_s1_e3_c40_se0.25'], ['ir_r1_k5_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c120_se0.25', 'ir_r5_k5_s1_e3_c120_se0.25'], ['ir_r1_k3_s2_e6_c184_se0.25', 'ir_r5_k5_s1_e4_c184_se0.25', 'ir_r1_k5_s1_e6_c224_se0.25'], ['cn_r1_k1_s1_c1344'], ] elif vl == 'd': stem_size = 24 arch_def = [ ['ds_r2_k3_s1_e1_c16'], ['ir_r1_k3_s2_e5_c24', 'ir_r5_k3_s1_e2_c24'], ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r4_k3_s1_e3_c40_se0.25'], ['ir_r1_k3_s2_e5_c72', 'ir_r4_k3_s1_e3_c72'], ['ir_r1_k3_s1_e5_c128_se0.25', 'ir_r6_k5_s1_e3_c128_se0.25'], ['ir_r1_k3_s2_e6_c208_se0.25', 'ir_r5_k5_s1_e5_c208_se0.25', 'ir_r1_k5_s1_e6_c240_se0.25'], ['cn_r1_k1_s1_c1440'], ] elif vl == 'g': stem_size = 32 arch_def = [ ['ds_r3_k3_s1_e1_c24'], ['ir_r1_k5_s2_e4_c40', 'ir_r4_k5_s1_e2_c40'], ['ir_r1_k5_s2_e4_c56_se0.25', 'ir_r4_k5_s1_e3_c56_se0.25'], ['ir_r1_k5_s2_e5_c104', 'ir_r4_k3_s1_e3_c104'], ['ir_r1_k3_s1_e5_c160_se0.25', 'ir_r8_k5_s1_e3_c160_se0.25'], ['ir_r1_k3_s2_e6_c264_se0.25', 'ir_r6_k5_s1_e5_c264_se0.25', 'ir_r2_k5_s1_e6_c288_se0.25'], ['cn_r1_k1_s1_c1728'], ] else: raise NotImplemented round_chs_fn = partial(round_channels, multiplier=channel_multiplier, round_limit=0.95) se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=round_chs_fn) act_layer = resolve_act_layer(kwargs, 'hard_swish') model_kwargs = dict( block_args=decode_arch_def(arch_def), num_features=1984, head_bias=False, stem_size=stem_size, round_chs_fn=round_chs_fn, se_from_exp=False, norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=act_layer, se_layer=se_layer, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_lcnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ LCNet Essentially a MobileNet-V3 crossed with a MobileNet-V1 Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['dsa_r1_k3_s1_c32'], # stage 1, 112x112 in ['dsa_r2_k3_s2_c64'], # stage 2, 56x56 in ['dsa_r2_k3_s2_c128'], # stage 3, 28x28 in ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], # stage 4, 14x14in ['dsa_r4_k5_s1_c256'], # stage 5, 14x14in ['dsa_r2_k5_s2_c512_se0.25'], # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), num_features=1280, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _gen_lcnet(variant, channel_multiplier=1.0, pretrained=False, **kwargs): """ LCNet Essentially a MobileNet-V3 crossed with a MobileNet-V1 Paper: `PP-LCNet: A Lightweight CPU Convolutional Neural Network` - https://arxiv.org/abs/2109.15099 Args: channel_multiplier: multiplier to number of channels per layer. """ arch_def = [ # stage 0, 112x112 in ['dsa_r1_k3_s1_c32'], # stage 1, 112x112 in ['dsa_r2_k3_s2_c64'], # stage 2, 56x56 in ['dsa_r2_k3_s2_c128'], # stage 3, 28x28 in ['dsa_r1_k3_s2_c256', 'dsa_r1_k5_s1_c256'], # stage 4, 14x14in ['dsa_r4_k5_s1_c256'], # stage 5, 14x14in ['dsa_r2_k5_s2_c512_se0.25'], # 7x7 ] model_kwargs = dict( block_args=decode_arch_def(arch_def), stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)), act_layer=resolve_act_layer(kwargs, 'hard_swish'), se_layer=partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU), num_features=1280, **kwargs, ) model = _create_mnv3(variant, pretrained, **model_kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'mobilenetv3_large_075.untrained': _cfg(url=''), 'mobilenetv3_large_100.ra_in1k': _cfg( interpolation='bicubic', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.miil_in21k_ft_in1k': _cfg( interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_1k_miil_78_0-66471c13.pth', hf_hub_id='timm/'), 'mobilenetv3_large_100.miil_in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/mobilenetv3_large_100_in21k_miil-d71cc17b.pth', hf_hub_id='timm/', origin_url='https://github.com/Alibaba-MIIL/ImageNet21K', paper_ids='arXiv:2104.10972v4', interpolation='bilinear', mean=(0., 0., 0.), std=(1., 1., 1.), num_classes=11221), 'mobilenetv3_small_050.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_075.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_small_100.lamb_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_100_lamb-266a294c.pth', hf_hub_id='timm/', interpolation='bicubic'), 'mobilenetv3_rw.rmsp_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth', interpolation='bicubic'), 'tf_mobilenetv3_large_075.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_large_minimal_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_075.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_100.in1k': _cfg( url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'tf_mobilenetv3_small_minimal_100.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth', hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD), 'fbnetv3_b.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_b_224-ead5d2a1.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_d.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_d_224-c98bce42.pth', hf_hub_id='timm/', test_input_size=(3, 256, 256), crop_pct=0.95), 'fbnetv3_g.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetv3_g_240-0b1df83b.pth', hf_hub_id='timm/', input_size=(3, 240, 240), test_input_size=(3, 288, 288), crop_pct=0.95, pool_size=(8, 8)), "lcnet_035.untrained": _cfg(), "lcnet_050.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_050-f447553b.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_075.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_075-318cad2c.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_100.ra2_in1k": _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/lcnet_100-a929038c.pth', hf_hub_id='timm/', interpolation='bicubic', ), "lcnet_150.untrained": _cfg(), }) @register_model def mobilenetv3_large_075(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_large_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_050(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_050', 0.50, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_075(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_small_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ model = _gen_mobilenet_v3('mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def mobilenetv3_rw(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ if pretrained: # pretrained model trained with non-default BN epsilon kwargs['bn_eps'] = BN_EPS_TF_DEFAULT model = _gen_mobilenet_v3_rw('mobilenetv3_rw', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_075(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_large_minimal_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_large_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_075(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def tf_mobilenetv3_small_minimal_100(pretrained=False, **kwargs) -> MobileNetV3: """ MobileNet V3 """ kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_mobilenet_v3('tf_mobilenetv3_small_minimal_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_b(pretrained=False, **kwargs) -> MobileNetV3: """ FBNetV3-B """ model = _gen_fbnetv3('fbnetv3_b', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_d(pretrained=False, **kwargs) -> MobileNetV3: """ FBNetV3-D """ model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs) return model @register_model def fbnetv3_g(pretrained=False, **kwargs) -> MobileNetV3: """ FBNetV3-G """ model = _gen_fbnetv3('fbnetv3_g', pretrained=pretrained, **kwargs) return model @register_model def lcnet_035(pretrained=False, **kwargs) -> MobileNetV3: """ PP-LCNet 0.35""" model = _gen_lcnet('lcnet_035', 0.35, pretrained=pretrained, **kwargs) return model @register_model def lcnet_050(pretrained=False, **kwargs) -> MobileNetV3: """ PP-LCNet 0.5""" model = _gen_lcnet('lcnet_050', 0.5, pretrained=pretrained, **kwargs) return model @register_model def lcnet_075(pretrained=False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.0""" model = _gen_lcnet('lcnet_075', 0.75, pretrained=pretrained, **kwargs) return model @register_model def lcnet_100(pretrained=False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.0""" model = _gen_lcnet('lcnet_100', 1.0, pretrained=pretrained, **kwargs) return model @register_model def lcnet_150(pretrained=False, **kwargs) -> MobileNetV3: """ PP-LCNet 1.5""" model = _gen_lcnet('lcnet_150', 1.5, pretrained=pretrained, **kwargs) return model register_model_deprecations(__name__, { 'mobilenetv3_large_100_miil': 'mobilenetv3_large_100.miil_in21k_ft_in1k', 'mobilenetv3_large_100_miil_in21k': 'mobilenetv3_large_100.miil_in21k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/mobilevit.py
""" MobileViT Paper: V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178 V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680 MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below) License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source) Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman """ # # For licensing see accompanying LICENSE file. # Copyright (C) 2020 Apple Inc. All Rights Reserved. # import math from typing import Callable, Tuple, Optional import torch import torch.nn.functional as F from torch import nn from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups from .vision_transformer import Block as TransformerBlock __all__ = [] def _inverted_residual_block(d, c, s, br=4.0): # inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise) return ByoBlockCfg( type='bottle', d=d, c=c, s=s, gs=1, br=br, block_kwargs=dict(bottle_in=True, linear_out=True)) def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit', d=1, c=c, s=1, block_kwargs=dict( transformer_dim=transformer_dim, transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, block_kwargs=dict( transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_cfg(multiplier=1.0): chs = (64, 128, 256, 384, 512) if multiplier != 1.0: chs = tuple([int(c * multiplier) for c in chs]) cfg = ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3), ), stem_chs=int(32 * multiplier), stem_type='3x3', stem_pool='', downsample='', act_layer='silu', ) return cfg model_cfgs = dict( mobilevit_xxs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=16, s=1, br=2.0), _inverted_residual_block(d=3, c=24, s=2, br=2.0), _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=320, ), mobilevit_xs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=48, s=2), _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=384, ), mobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=640, ), semobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', attn_layer='se', attn_kwargs=dict(rd_ratio=1/8), num_features=640, ), mobilevitv2_050=_mobilevitv2_cfg(.50), mobilevitv2_075=_mobilevitv2_cfg(.75), mobilevitv2_125=_mobilevitv2_cfg(1.25), mobilevitv2_100=_mobilevitv2_cfg(1.0), mobilevitv2_150=_mobilevitv2_cfg(1.5), mobilevitv2_175=_mobilevitv2_cfg(1.75), mobilevitv2_200=_mobilevitv2_cfg(2.0), ) @register_notrace_module class MobileVitBlock(nn.Module): """ MobileViT block Paper: https://arxiv.org/abs/2110.02178?context=cs.LG """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, stride: int = 1, bottle_ratio: float = 1.0, group_size: Optional[int] = None, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, num_heads: int = 4, attn_drop: float = 0., drop: int = 0., no_fusion: bool = False, drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = nn.LayerNorm, **kwargs, # eat unused args ): super(MobileVitBlock, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=stride, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ TransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, num_heads=num_heads, qkv_bias=True, attn_drop=attn_drop, proj_drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer, ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) if no_fusion: self.conv_fusion = None else: self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches) patch_h, patch_w = self.patch_size B, C, H, W = x.shape new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N interpolate = False if new_h != H or new_w != W: # Note: Padding can be done, but then it needs to be handled in attention function. x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False) interpolate = True # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patch -> feature map) # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] x = x.contiguous().view(B, self.patch_area, num_patches, -1) x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) if interpolate: x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False) x = self.conv_proj(x) if self.conv_fusion is not None: x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) return x class LinearSelfAttention(nn.Module): """ This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680` This layer can be used for self- as well as cross-attention. Args: embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)` attn_drop (float): Dropout value for context scores. Default: 0.0 bias (bool): Use bias in learnable layers. Default: True Shape: - Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels, :math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches - Output: same as the input .. note:: For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor, we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be expensive on resource-constrained devices) that may be required to convert the unfolded tensor from channel-first to channel-last format in case of a linear layer. """ def __init__( self, embed_dim: int, attn_drop: float = 0.0, proj_drop: float = 0.0, bias: bool = True, ) -> None: super().__init__() self.embed_dim = embed_dim self.qkv_proj = nn.Conv2d( in_channels=embed_dim, out_channels=1 + (2 * embed_dim), bias=bias, kernel_size=1, ) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Conv2d( in_channels=embed_dim, out_channels=embed_dim, bias=bias, kernel_size=1, ) self.out_drop = nn.Dropout(proj_drop) def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: # [B, C, P, N] --> [B, h + 2d, P, N] qkv = self.qkv_proj(x) # Project x into query, key and value # Query --> [B, 1, P, N] # value, key --> [B, d, P, N] query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) # apply softmax along N dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # Compute context vector # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out @torch.jit.ignore() def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: # x --> [B, C, P, N] # x_prev = [B, C, P, M] batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape q_patch_area, q_num_patches = x.shape[-2:] assert ( kv_patch_area == q_patch_area ), "The number of pixels in a patch for query and key_value should be the same" # compute query, key, and value # [B, C, P, M] --> [B, 1 + d, P, M] qk = F.conv2d( x_prev, weight=self.qkv_proj.weight[:self.embed_dim + 1], bias=self.qkv_proj.bias[:self.embed_dim + 1], ) # [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M] query, key = qk.split([1, self.embed_dim], dim=1) # [B, C, P, N] --> [B, d, P, N] value = F.conv2d( x, weight=self.qkv_proj.weight[self.embed_dim + 1], bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None, ) # apply softmax along M dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # compute context vector # [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: return self._forward_self_attn(x) else: return self._forward_cross_attn(x, x_prev=x_prev) class LinearTransformerBlock(nn.Module): """ This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_ Args: embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)` mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim drop (float): Dropout rate. Default: 0.0 attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0 drop_path (float): Stochastic depth rate Default: 0.0 norm_layer (Callable): Normalization layer. Default: layer_norm_2d Shape: - Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim, :math:`P` is number of pixels in a patch, and :math:`N` is number of patches, - Output: same shape as the input """ def __init__( self, embed_dim: int, mlp_ratio: float = 2.0, drop: float = 0.0, attn_drop: float = 0.0, drop_path: float = 0.0, act_layer=None, norm_layer=None, ) -> None: super().__init__() act_layer = act_layer or nn.SiLU norm_layer = norm_layer or GroupNorm1 self.norm1 = norm_layer(embed_dim) self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) self.drop_path1 = DropPath(drop_path) self.norm2 = norm_layer(embed_dim) self.mlp = ConvMlp( in_features=embed_dim, hidden_features=int(embed_dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: # self-attention x = x + self.drop_path1(self.attn(self.norm1(x))) else: # cross-attention res = x x = self.norm1(x) # norm x = self.attn(x, x_prev) # attn x = self.drop_path1(x) + res # residual # Feed forward network x = x + self.drop_path2(self.mlp(self.norm2(x))) return x @register_notrace_module class MobileVitV2Block(nn.Module): """ This class defines the `MobileViTv2 block <>`_ """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, bottle_ratio: float = 1.0, group_size: Optional[int] = 1, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, attn_drop: float = 0., drop: int = 0., drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = GroupNorm1, **kwargs, # eat unused args ): super(MobileVitV2Block, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=1, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ LinearTransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, attn_drop=attn_drop, drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] self.coreml_exportable = is_exportable() def forward(self, x: torch.Tensor) -> torch.Tensor: B, C, H, W = x.shape patch_h, patch_w = self.patch_size new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N if new_h != H or new_w != W: x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True) # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N] C = x.shape[1] if self.coreml_exportable: x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w)) else: x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) x = x.reshape(B, C, -1, num_patches) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W] if self.coreml_exportable: # adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624 x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w) x = F.pixel_shuffle(x, upscale_factor=patch_h) else: x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) x = self.conv_proj(x) return x register_block('mobilevit', MobileVitBlock) register_block('mobilevit2', MobileVitV2Block) def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'fixed_input_size': False, **kwargs } default_cfgs = generate_default_cfgs({ 'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevitv2_050.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_075.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_100.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_125.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), }) @register_model def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) @register_model def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) @register_model def mobilevit_s(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k', 'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k', 'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k', 'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384', 'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384', 'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/mvitv2.py
""" Multi-Scale Vision Transformer v2 @inproceedings{li2021improved, title={MViTv2: Improved multiscale vision transformers for classification and detection}, author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, booktitle={CVPR}, year={2022} } Code adapted from original Apache 2.0 licensed impl at https://github.com/facebookresearch/mvit Original copyright below. Modifications and timm support by / Copyright 2022, Ross Wightman """ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved. All Rights Reserved. import operator from collections import OrderedDict from dataclasses import dataclass from functools import partial, reduce from typing import Union, List, Tuple, Optional import torch import torch.utils.checkpoint as checkpoint from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, trunc_normal_tf_, get_norm_layer, to_2tuple from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._registry import register_model, register_model_deprecations, generate_default_cfgs __all__ = ['MultiScaleVit', 'MultiScaleVitCfg'] # model_registry will add each entrypoint fn to this @dataclass class MultiScaleVitCfg: depths: Tuple[int, ...] = (2, 3, 16, 3) embed_dim: Union[int, Tuple[int, ...]] = 96 num_heads: Union[int, Tuple[int, ...]] = 1 mlp_ratio: float = 4. pool_first: bool = False expand_attn: bool = True qkv_bias: bool = True use_cls_token: bool = False use_abs_pos: bool = False residual_pooling: bool = True mode: str = 'conv' kernel_qkv: Tuple[int, int] = (3, 3) stride_q: Optional[Tuple[Tuple[int, int]]] = ((1, 1), (2, 2), (2, 2), (2, 2)) stride_kv: Optional[Tuple[Tuple[int, int]]] = None stride_kv_adaptive: Optional[Tuple[int, int]] = (4, 4) patch_kernel: Tuple[int, int] = (7, 7) patch_stride: Tuple[int, int] = (4, 4) patch_padding: Tuple[int, int] = (3, 3) pool_type: str = 'max' rel_pos_type: str = 'spatial' act_layer: Union[str, Tuple[str, str]] = 'gelu' norm_layer: Union[str, Tuple[str, str]] = 'layernorm' norm_eps: float = 1e-6 def __post_init__(self): num_stages = len(self.depths) if not isinstance(self.embed_dim, (tuple, list)): self.embed_dim = tuple(self.embed_dim * 2 ** i for i in range(num_stages)) assert len(self.embed_dim) == num_stages if not isinstance(self.num_heads, (tuple, list)): self.num_heads = tuple(self.num_heads * 2 ** i for i in range(num_stages)) assert len(self.num_heads) == num_stages if self.stride_kv_adaptive is not None and self.stride_kv is None: _stride_kv = self.stride_kv_adaptive pool_kv_stride = [] for i in range(num_stages): if min(self.stride_q[i]) > 1: _stride_kv = [ max(_stride_kv[d] // self.stride_q[i][d], 1) for d in range(len(_stride_kv)) ] pool_kv_stride.append(tuple(_stride_kv)) self.stride_kv = tuple(pool_kv_stride) def prod(iterable): return reduce(operator.mul, iterable, 1) class PatchEmbed(nn.Module): """ PatchEmbed. """ def __init__( self, dim_in=3, dim_out=768, kernel=(7, 7), stride=(4, 4), padding=(3, 3), ): super().__init__() self.proj = nn.Conv2d( dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.proj(x) # B C H W -> B HW C return x.flatten(2).transpose(1, 2), x.shape[-2:] @register_notrace_function def reshape_pre_pool( x, feat_size: List[int], has_cls_token: bool = True ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: H, W = feat_size if has_cls_token: cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] else: cls_tok = None x = x.reshape(-1, H, W, x.shape[-1]).permute(0, 3, 1, 2).contiguous() return x, cls_tok @register_notrace_function def reshape_post_pool( x, num_heads: int, cls_tok: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, List[int]]: feat_size = [x.shape[2], x.shape[3]] L_pooled = x.shape[2] * x.shape[3] x = x.reshape(-1, num_heads, x.shape[1], L_pooled).transpose(2, 3) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=2) return x, feat_size @register_notrace_function def cal_rel_pos_type( attn: torch.Tensor, q: torch.Tensor, has_cls_token: bool, q_size: List[int], k_size: List[int], rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, ): """ Spatial Relative Positional Embeddings. """ sp_idx = 1 if has_cls_token else 0 q_h, q_w = q_size k_h, k_w = k_size # Scale up rel pos if shapes for q and k are different. q_h_ratio = max(k_h / q_h, 1.0) k_h_ratio = max(q_h / k_h, 1.0) dist_h = ( torch.arange(q_h, device=q.device).unsqueeze(-1) * q_h_ratio - torch.arange(k_h, device=q.device).unsqueeze(0) * k_h_ratio ) dist_h += (k_h - 1) * k_h_ratio q_w_ratio = max(k_w / q_w, 1.0) k_w_ratio = max(q_w / k_w, 1.0) dist_w = ( torch.arange(q_w, device=q.device).unsqueeze(-1) * q_w_ratio - torch.arange(k_w, device=q.device).unsqueeze(0) * k_w_ratio ) dist_w += (k_w - 1) * k_w_ratio rel_h = rel_pos_h[dist_h.long()] rel_w = rel_pos_w[dist_w.long()] B, n_head, q_N, dim = q.shape r_q = q[:, :, sp_idx:].reshape(B, n_head, q_h, q_w, dim) rel_h = torch.einsum("byhwc,hkc->byhwk", r_q, rel_h) rel_w = torch.einsum("byhwc,wkc->byhwk", r_q, rel_w) attn[:, :, sp_idx:, sp_idx:] = ( attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + rel_h.unsqueeze(-1) + rel_w.unsqueeze(-2) ).view(B, -1, q_h * q_w, k_h * k_w) return attn class MultiScaleAttentionPoolFirst(nn.Module): def __init__( self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, ): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** -0.5 self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.q = nn.Linear(dim, dim_out, bias=qkv_bias) self.k = nn.Linear(dim, dim_out, bias=qkv_bias) self.v = nn.Linear(dim, dim_out, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) # Skip pooling with kernel and stride size of (1, 1, 1). if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' self.pool_q, self.pool_k, self.pool_v = None, None, None self.norm_q, self.norm_k, self.norm_v = None, None, None if mode in ("avg", "max"): pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == "conv" or mode == "conv_unshared": dim_conv = dim // num_heads if mode == "conv" else dim if kernel_q: self.pool_q = nn.Conv2d( dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False, ) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f"Unsupported model {mode}") # relative pos embedding self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): B, N, _ = x.shape fold_dim = 1 if self.unshared else self.num_heads x = x.reshape(B, N, fold_dim, -1).permute(0, 2, 1, 3) q = k = v = x if self.pool_q is not None: q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) q, q_size = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) k, k_size = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) v, v_size = reshape_post_pool(v, self.num_heads, v_tok) else: v_size = feat_size if self.norm_v is not None: v = self.norm_v(v) q_N = q_size[0] * q_size[1] + int(self.has_cls_token) q = q.transpose(1, 2).reshape(B, q_N, -1) q = self.q(q).reshape(B, q_N, self.num_heads, -1).transpose(1, 2) k_N = k_size[0] * k_size[1] + int(self.has_cls_token) k = k.transpose(1, 2).reshape(B, k_N, -1) k = self.k(k).reshape(B, k_N, self.num_heads, -1) v_N = v_size[0] * v_size[1] + int(self.has_cls_token) v = v.transpose(1, 2).reshape(B, v_N, -1) v = self.v(v).reshape(B, v_N, self.num_heads, -1).transpose(1, 2) attn = (q * self.scale) @ k if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type( attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w, ) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return x, q_size class MultiScaleAttention(nn.Module): def __init__( self, dim, dim_out, feat_size, num_heads=8, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, ): super().__init__() self.num_heads = num_heads self.dim_out = dim_out self.head_dim = dim_out // num_heads self.scale = self.head_dim ** -0.5 self.has_cls_token = has_cls_token padding_q = tuple([int(q // 2) for q in kernel_q]) padding_kv = tuple([int(kv // 2) for kv in kernel_kv]) self.qkv = nn.Linear(dim, dim_out * 3, bias=qkv_bias) self.proj = nn.Linear(dim_out, dim_out) # Skip pooling with kernel and stride size of (1, 1, 1). if prod(kernel_q) == 1 and prod(stride_q) == 1: kernel_q = None if prod(kernel_kv) == 1 and prod(stride_kv) == 1: kernel_kv = None self.mode = mode self.unshared = mode == 'conv_unshared' self.norm_q, self.norm_k, self.norm_v = None, None, None self.pool_q, self.pool_k, self.pool_v = None, None, None if mode in ("avg", "max"): pool_op = nn.MaxPool2d if mode == "max" else nn.AvgPool2d if kernel_q: self.pool_q = pool_op(kernel_q, stride_q, padding_q) if kernel_kv: self.pool_k = pool_op(kernel_kv, stride_kv, padding_kv) self.pool_v = pool_op(kernel_kv, stride_kv, padding_kv) elif mode == "conv" or mode == "conv_unshared": dim_conv = dim_out // num_heads if mode == "conv" else dim_out if kernel_q: self.pool_q = nn.Conv2d( dim_conv, dim_conv, kernel_q, stride=stride_q, padding=padding_q, groups=dim_conv, bias=False, ) self.norm_q = norm_layer(dim_conv) if kernel_kv: self.pool_k = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_k = norm_layer(dim_conv) self.pool_v = nn.Conv2d( dim_conv, dim_conv, kernel_kv, stride=stride_kv, padding=padding_kv, groups=dim_conv, bias=False, ) self.norm_v = norm_layer(dim_conv) else: raise NotImplementedError(f"Unsupported model {mode}") # relative pos embedding self.rel_pos_type = rel_pos_type if self.rel_pos_type == 'spatial': assert feat_size[0] == feat_size[1] size = feat_size[0] q_size = size // stride_q[1] if len(stride_q) > 0 else size kv_size = size // stride_kv[1] if len(stride_kv) > 0 else size rel_sp_dim = 2 * max(q_size, kv_size) - 1 self.rel_pos_h = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros(rel_sp_dim, self.head_dim)) trunc_normal_tf_(self.rel_pos_h, std=0.02) trunc_normal_tf_(self.rel_pos_w, std=0.02) self.residual_pooling = residual_pooling def forward(self, x, feat_size: List[int]): B, N, _ = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(dim=0) if self.pool_q is not None: q, q_tok = reshape_pre_pool(q, feat_size, self.has_cls_token) q = self.pool_q(q) q, q_size = reshape_post_pool(q, self.num_heads, q_tok) else: q_size = feat_size if self.norm_q is not None: q = self.norm_q(q) if self.pool_k is not None: k, k_tok = reshape_pre_pool(k, feat_size, self.has_cls_token) k = self.pool_k(k) k, k_size = reshape_post_pool(k, self.num_heads, k_tok) else: k_size = feat_size if self.norm_k is not None: k = self.norm_k(k) if self.pool_v is not None: v, v_tok = reshape_pre_pool(v, feat_size, self.has_cls_token) v = self.pool_v(v) v, _ = reshape_post_pool(v, self.num_heads, v_tok) if self.norm_v is not None: v = self.norm_v(v) attn = (q * self.scale) @ k.transpose(-2, -1) if self.rel_pos_type == 'spatial': attn = cal_rel_pos_type( attn, q, self.has_cls_token, q_size, k_size, self.rel_pos_h, self.rel_pos_w, ) attn = attn.softmax(dim=-1) x = attn @ v if self.residual_pooling: x = x + q x = x.transpose(1, 2).reshape(B, -1, self.dim_out) x = self.proj(x) return x, q_size class MultiScaleBlock(nn.Module): def __init__( self, dim, dim_out, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, drop_path=0.0, norm_layer=nn.LayerNorm, kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), mode="conv", has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True, ): super().__init__() proj_needed = dim != dim_out self.dim = dim self.dim_out = dim_out self.has_cls_token = has_cls_token self.norm1 = norm_layer(dim) self.shortcut_proj_attn = nn.Linear(dim, dim_out) if proj_needed and expand_attn else None if stride_q and prod(stride_q) > 1: kernel_skip = [s + 1 if s > 1 else s for s in stride_q] stride_skip = stride_q padding_skip = [int(skip // 2) for skip in kernel_skip] self.shortcut_pool_attn = nn.MaxPool2d(kernel_skip, stride_skip, padding_skip) else: self.shortcut_pool_attn = None att_dim = dim_out if expand_attn else dim attn_layer = MultiScaleAttentionPoolFirst if pool_first else MultiScaleAttention self.attn = attn_layer( dim, att_dim, num_heads=num_heads, feat_size=feat_size, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q, stride_kv=stride_kv, norm_layer=norm_layer, has_cls_token=has_cls_token, mode=mode, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(att_dim) mlp_dim_out = dim_out self.shortcut_proj_mlp = nn.Linear(dim, dim_out) if proj_needed and not expand_attn else None self.mlp = Mlp( in_features=att_dim, hidden_features=int(att_dim * mlp_ratio), out_features=mlp_dim_out, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() def _shortcut_pool(self, x, feat_size: List[int]): if self.shortcut_pool_attn is None: return x if self.has_cls_token: cls_tok, x = x[:, :1, :], x[:, 1:, :] else: cls_tok = None B, L, C = x.shape H, W = feat_size x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() x = self.shortcut_pool_attn(x) x = x.reshape(B, C, -1).transpose(1, 2) if cls_tok is not None: x = torch.cat((cls_tok, x), dim=1) return x def forward(self, x, feat_size: List[int]): x_norm = self.norm1(x) # NOTE as per the original impl, this seems odd, but shortcut uses un-normalized input if no proj x_shortcut = x if self.shortcut_proj_attn is None else self.shortcut_proj_attn(x_norm) x_shortcut = self._shortcut_pool(x_shortcut, feat_size) x, feat_size_new = self.attn(x_norm, feat_size) x = x_shortcut + self.drop_path1(x) x_norm = self.norm2(x) x_shortcut = x if self.shortcut_proj_mlp is None else self.shortcut_proj_mlp(x_norm) x = x_shortcut + self.drop_path2(self.mlp(x_norm)) return x, feat_size_new class MultiScaleVitStage(nn.Module): def __init__( self, dim, dim_out, depth, num_heads, feat_size, mlp_ratio=4.0, qkv_bias=True, mode="conv", kernel_q=(1, 1), kernel_kv=(1, 1), stride_q=(1, 1), stride_kv=(1, 1), has_cls_token=True, expand_attn=False, pool_first=False, rel_pos_type='spatial', residual_pooling=True, norm_layer=nn.LayerNorm, drop_path=0.0, ): super().__init__() self.grad_checkpointing = False self.blocks = nn.ModuleList() if expand_attn: out_dims = (dim_out,) * depth else: out_dims = (dim,) * (depth - 1) + (dim_out,) for i in range(depth): attention_block = MultiScaleBlock( dim=dim, dim_out=out_dims[i], num_heads=num_heads, feat_size=feat_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, kernel_q=kernel_q, kernel_kv=kernel_kv, stride_q=stride_q if i == 0 else (1, 1), stride_kv=stride_kv, mode=mode, has_cls_token=has_cls_token, pool_first=pool_first, rel_pos_type=rel_pos_type, residual_pooling=residual_pooling, expand_attn=expand_attn, norm_layer=norm_layer, drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path, ) dim = out_dims[i] self.blocks.append(attention_block) if i == 0: feat_size = tuple([size // stride for size, stride in zip(feat_size, stride_q)]) self.feat_size = feat_size def forward(self, x, feat_size: List[int]): for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x, feat_size = checkpoint.checkpoint(blk, x, feat_size) else: x, feat_size = blk(x, feat_size) return x, feat_size class MultiScaleVit(nn.Module): """ Improved Multiscale Vision Transformers for Classification and Detection Yanghao Li*, Chao-Yuan Wu*, Haoqi Fan, Karttikeya Mangalam, Bo Xiong, Jitendra Malik, Christoph Feichtenhofer* https://arxiv.org/abs/2112.01526 Multiscale Vision Transformers Haoqi Fan*, Bo Xiong*, Karttikeya Mangalam*, Yanghao Li*, Zhicheng Yan, Jitendra Malik, Christoph Feichtenhofer* https://arxiv.org/abs/2104.11227 """ def __init__( self, cfg: MultiScaleVitCfg, img_size: Tuple[int, int] = (224, 224), in_chans: int = 3, global_pool: Optional[str] = None, num_classes: int = 1000, drop_path_rate: float = 0., drop_rate: float = 0., ): super().__init__() img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) self.num_classes = num_classes self.drop_rate = drop_rate if global_pool is None: global_pool = 'token' if cfg.use_cls_token else 'avg' self.global_pool = global_pool self.depths = tuple(cfg.depths) self.expand_attn = cfg.expand_attn embed_dim = cfg.embed_dim[0] self.patch_embed = PatchEmbed( dim_in=in_chans, dim_out=embed_dim, kernel=cfg.patch_kernel, stride=cfg.patch_stride, padding=cfg.patch_padding, ) patch_dims = (img_size[0] // cfg.patch_stride[0], img_size[1] // cfg.patch_stride[1]) num_patches = prod(patch_dims) if cfg.use_cls_token: self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.num_prefix_tokens = 1 pos_embed_dim = num_patches + 1 else: self.num_prefix_tokens = 0 self.cls_token = None pos_embed_dim = num_patches if cfg.use_abs_pos: self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_dim, embed_dim)) else: self.pos_embed = None num_stages = len(cfg.embed_dim) feat_size = patch_dims dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] self.stages = nn.ModuleList() for i in range(num_stages): if cfg.expand_attn: dim_out = cfg.embed_dim[i] else: dim_out = cfg.embed_dim[min(i + 1, num_stages - 1)] stage = MultiScaleVitStage( dim=embed_dim, dim_out=dim_out, depth=cfg.depths[i], num_heads=cfg.num_heads[i], feat_size=feat_size, mlp_ratio=cfg.mlp_ratio, qkv_bias=cfg.qkv_bias, mode=cfg.mode, pool_first=cfg.pool_first, expand_attn=cfg.expand_attn, kernel_q=cfg.kernel_qkv, kernel_kv=cfg.kernel_qkv, stride_q=cfg.stride_q[i], stride_kv=cfg.stride_kv[i], has_cls_token=cfg.use_cls_token, rel_pos_type=cfg.rel_pos_type, residual_pooling=cfg.residual_pooling, norm_layer=norm_layer, drop_path=dpr[i], ) embed_dim = dim_out feat_size = stage.feat_size self.stages.append(stage) self.num_features = embed_dim self.norm = norm_layer(embed_dim) self.head = nn.Sequential(OrderedDict([ ('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) ])) if self.pos_embed is not None: trunc_normal_tf_(self.pos_embed, std=0.02) if self.cls_token is not None: trunc_normal_tf_(self.cls_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_tf_(m.weight, std=0.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0.0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if any(n in k for n in ["pos_embed", "rel_pos_h", "rel_pos_w", "cls_token"])} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Sequential(OrderedDict([ ('drop', nn.Dropout(self.drop_rate)), ('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()) ])) def forward_features(self, x): x, feat_size = self.patch_embed(x) B, N, C = x.shape if self.cls_token is not None: cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed for stage in self.stages: x, feat_size = stage(x, feat_size) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: if self.global_pool == 'avg': x = x[:, self.num_prefix_tokens:].mean(1) else: x = x[:, 0] return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict import re if 'model_state' in state_dict: state_dict = state_dict['model_state'] depths = getattr(model, 'depths', None) expand_attn = getattr(model, 'expand_attn', True) assert depths is not None, 'model requires depth attribute to remap checkpoints' depth_map = {} block_idx = 0 for stage_idx, d in enumerate(depths): depth_map.update({i: (stage_idx, i - block_idx) for i in range(block_idx, block_idx + d)}) block_idx += d out_dict = {} for k, v in state_dict.items(): k = re.sub( r'blocks\.(\d+)', lambda x: f'stages.{depth_map[int(x.group(1))][0]}.blocks.{depth_map[int(x.group(1))][1]}', k) if expand_attn: k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_attn', k) else: k = re.sub(r'stages\.(\d+).blocks\.(\d+).proj', f'stages.\\1.blocks.\\2.shortcut_proj_mlp', k) if 'head' in k: k = k.replace('head.projection', 'head.fc') out_dict[k] = v # for k, v in state_dict.items(): # if model.pos_embed is not None and k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # # To resize pos embedding when using model at different size from pretrained weights # v = resize_pos_embed( # v, # model.pos_embed, # 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), # model.patch_embed.grid_size # ) return out_dict model_cfgs = dict( mvitv2_tiny=MultiScaleVitCfg( depths=(1, 2, 5, 2), ), mvitv2_small=MultiScaleVitCfg( depths=(1, 2, 11, 2), ), mvitv2_base=MultiScaleVitCfg( depths=(2, 3, 16, 3), ), mvitv2_large=MultiScaleVitCfg( depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, expand_attn=False, ), mvitv2_small_cls=MultiScaleVitCfg( depths=(1, 2, 11, 2), use_cls_token=True, ), mvitv2_base_cls=MultiScaleVitCfg( depths=(2, 3, 16, 3), use_cls_token=True, ), mvitv2_large_cls=MultiScaleVitCfg( depths=(2, 6, 36, 4), embed_dim=144, num_heads=2, use_cls_token=True, expand_attn=True, ), mvitv2_huge_cls=MultiScaleVitCfg( depths=(4, 8, 60, 8), embed_dim=192, num_heads=3, use_cls_token=True, expand_attn=True, ), ) def _create_mvitv2(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( MultiScaleVit, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'fixed_input_size': True, **kwargs } default_cfgs = generate_default_cfgs({ 'mvitv2_tiny.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth'), 'mvitv2_small.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth'), 'mvitv2_base.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth'), 'mvitv2_large.fb_in1k': _cfg(url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth'), 'mvitv2_small_cls': _cfg(url=''), 'mvitv2_base_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in21k.pyth', num_classes=19168), 'mvitv2_large_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in21k.pyth', num_classes=19168), 'mvitv2_huge_cls.fb_inw21k': _cfg( url='https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_H_in21k.pyth', num_classes=19168), }) @register_model def mvitv2_tiny(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_tiny', pretrained=pretrained, **kwargs) @register_model def mvitv2_small(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small', pretrained=pretrained, **kwargs) @register_model def mvitv2_base(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base', pretrained=pretrained, **kwargs) @register_model def mvitv2_large(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large', pretrained=pretrained, **kwargs) @register_model def mvitv2_small_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_small_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_base_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_base_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_large_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_large_cls', pretrained=pretrained, **kwargs) @register_model def mvitv2_huge_cls(pretrained=False, **kwargs) -> MultiScaleVit: return _create_mvitv2('mvitv2_huge_cls', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/nasnet.py
""" NasNet-A (Large) nasnetalarge implementation grabbed from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch """ from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['NASNetALarge'] class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d( in_channels, out_channels, kernel_size=1, padding=0) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, pad_type='', stem_cell=False): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, middle_channels, kernel_size, stride=stride, padding=pad_type) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001, momentum=0.1) self.act_2 = nn.ReLU(inplace=True) self.separable_2 = SeparableConv2d( middle_channels, out_channels, kernel_size, stride=1, padding=pad_type) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class CellStem0(nn.Module): def __init__(self, stem_size, num_channels=42, pad_type=''): super(CellStem0, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(self.stem_size, self.num_channels, 1, stride=1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.stem_size, self.num_channels, 7, 2, pad_type, stem_cell=True) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.stem_size, self.num_channels, 5, 2, pad_type, stem_cell=True) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x): x1 = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x1) x_comb_iter_0_right = self.comb_iter_0_right(x) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x1) x_comb_iter_1_right = self.comb_iter_1_right(x) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x1) x_comb_iter_2_right = self.comb_iter_2_right(x) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x1) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem1(nn.Module): def __init__(self, stem_size, num_channels, pad_type=''): super(CellStem1, self).__init__() self.num_channels = num_channels self.stem_size = stem_size self.conv_1x1 = ActConvBn(2 * self.num_channels, self.num_channels, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(self.stem_size, self.num_channels // 2, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(self.num_channels, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(self.num_channels, self.num_channels, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(self.num_channels, self.num_channels, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(self.num_channels, self.num_channels, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x_conv0, x_stem_0): x_left = self.conv_1x1(x_stem_0) x_relu = self.act(x_conv0) # path 1 x_path1 = self.path_1(x_relu) # path 2 x_path2 = self.path_2(x_relu) # final path x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_right) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_left) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_left) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class FirstCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(FirstCell, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1) self.act = nn.ReLU() self.path_1 = nn.Sequential() self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_1.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.path_2 = nn.Sequential() self.path_2.add_module('pad', nn.ZeroPad2d((-1, 1, -1, 1))) self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)) self.path_2.add_module('conv', nn.Conv2d(in_chs_left, out_chs_left, 1, stride=1, bias=False)) self.final_path_bn = nn.BatchNorm2d(out_chs_left * 2, eps=0.001, momentum=0.1) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_relu = self.act(x_prev) x_path1 = self.path_1(x_relu) x_path2 = self.path_2(x_relu) x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NormalCell(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(NormalCell, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 1, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_1_left = BranchSeparables(out_chs_left, out_chs_left, 5, 1, pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_left, out_chs_left, 3, 1, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_left = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_left) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_left x_comb_iter_3_left = self.comb_iter_3_left(x_left) x_comb_iter_3_right = self.comb_iter_3_right(x_left) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_right x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell0(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell0, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class ReductionCell1(nn.Module): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(ReductionCell1, self).__init__() self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, 1, stride=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, 1, stride=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_0_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_1_left = create_pool2d('max', 3, 2, padding=pad_type) self.comb_iter_1_right = BranchSeparables(out_chs_right, out_chs_right, 7, 2, pad_type) self.comb_iter_2_left = create_pool2d('avg', 3, 2, count_include_pad=False, padding=pad_type) self.comb_iter_2_right = BranchSeparables(out_chs_right, out_chs_right, 5, 2, pad_type) self.comb_iter_3_right = create_pool2d('avg', 3, 1, count_include_pad=False, padding=pad_type) self.comb_iter_4_left = BranchSeparables(out_chs_right, out_chs_right, 3, 1, pad_type) self.comb_iter_4_right = create_pool2d('max', 3, 2, padding=pad_type) def forward(self, x, x_prev): x_left = self.conv_prev_1x1(x_prev) x_right = self.conv_1x1(x) x_comb_iter_0_left = self.comb_iter_0_left(x_right) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_left) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_left) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0) x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1 x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0) x_comb_iter_4_right = self.comb_iter_4_right(x_right) x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class NASNetALarge(nn.Module): """NASNetALarge (6 @ 4032) """ def __init__( self, num_classes=1000, in_chans=3, stem_size=96, channel_multiplier=2, num_features=4032, output_stride=32, drop_rate=0., global_pool='avg', pad_type='same', ): super(NASNetALarge, self).__init__() self.num_classes = num_classes self.stem_size = stem_size self.num_features = num_features self.channel_multiplier = channel_multiplier assert output_stride == 32 channels = self.num_features // 24 # 24 is default value for the architecture self.conv0 = ConvNormAct( in_channels=in_chans, out_channels=self.stem_size, kernel_size=3, padding=0, stride=2, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0( self.stem_size, num_channels=channels // (channel_multiplier ** 2), pad_type=pad_type) self.cell_stem_1 = CellStem1( self.stem_size, num_channels=channels // channel_multiplier, pad_type=pad_type) self.cell_0 = FirstCell( in_chs_left=channels, out_chs_left=channels // 2, in_chs_right=2 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_1 = NormalCell( in_chs_left=2 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_2 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_3 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_4 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.cell_5 = NormalCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=6 * channels, out_chs_right=channels, pad_type=pad_type) self.reduction_cell_0 = ReductionCell0( in_chs_left=6 * channels, out_chs_left=2 * channels, in_chs_right=6 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_6 = FirstCell( in_chs_left=6 * channels, out_chs_left=channels, in_chs_right=8 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_7 = NormalCell( in_chs_left=8 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_8 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_9 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_10 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.cell_11 = NormalCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=12 * channels, out_chs_right=2 * channels, pad_type=pad_type) self.reduction_cell_1 = ReductionCell1( in_chs_left=12 * channels, out_chs_left=4 * channels, in_chs_right=12 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_12 = FirstCell( in_chs_left=12 * channels, out_chs_left=2 * channels, in_chs_right=16 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_13 = NormalCell( in_chs_left=16 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_14 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_15 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_16 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.cell_17 = NormalCell( in_chs_left=24 * channels, out_chs_left=4 * channels, in_chs_right=24 * channels, out_chs_right=4 * channels, pad_type=pad_type) self.act = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=96, reduction=2, module='conv0'), dict(num_chs=168, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1008, reduction=8, module='reduction_cell_0.conv_1x1.act'), dict(num_chs=2016, reduction=16, module='reduction_cell_1.conv_1x1.act'), dict(num_chs=4032, reduction=32, module='act'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv0|cell_stem_[01]', blocks=[ (r'^cell_(\d+)', None), (r'^reduction_cell_0', (6,)), (r'^reduction_cell_1', (12,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv0 = self.conv0(x) x_stem_0 = self.cell_stem_0(x_conv0) x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0) x_cell_0 = self.cell_0(x_stem_1, x_stem_0) x_cell_1 = self.cell_1(x_cell_0, x_stem_1) x_cell_2 = self.cell_2(x_cell_1, x_cell_0) x_cell_3 = self.cell_3(x_cell_2, x_cell_1) x_cell_4 = self.cell_4(x_cell_3, x_cell_2) x_cell_5 = self.cell_5(x_cell_4, x_cell_3) x_reduction_cell_0 = self.reduction_cell_0(x_cell_5, x_cell_4) x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_4) x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0) x_cell_8 = self.cell_8(x_cell_7, x_cell_6) x_cell_9 = self.cell_9(x_cell_8, x_cell_7) x_cell_10 = self.cell_10(x_cell_9, x_cell_8) x_cell_11 = self.cell_11(x_cell_10, x_cell_9) x_reduction_cell_1 = self.reduction_cell_1(x_cell_11, x_cell_10) x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_10) x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1) x_cell_14 = self.cell_14(x_cell_13, x_cell_12) x_cell_15 = self.cell_15(x_cell_14, x_cell_13) x_cell_16 = self.cell_16(x_cell_15, x_cell_14) x_cell_17 = self.cell_17(x_cell_16, x_cell_15) x = self.act(x_cell_17) return x def forward_head(self, x): x = self.global_pool(x) x = self.head_drop(x) x = self.last_linear(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_nasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( NASNetALarge, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model **kwargs, ) default_cfgs = generate_default_cfgs({ 'nasnetalarge.tf_in1k': { 'hf_hub_id': 'timm/', 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nasnetalarge-dc4a7b8b.pth', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv0.conv', 'classifier': 'last_linear', }, }) @register_model def nasnetalarge(pretrained=False, **kwargs) -> NASNetALarge: """NASNet-A large model architecture. """ model_kwargs = dict(pad_type='same', **kwargs) return _create_nasnet('nasnetalarge', pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/nest.py
""" Nested Transformer (NesT) in PyTorch A PyTorch implement of Aggregating Nested Transformers as described in: 'Aggregating Nested Transformers' - https://arxiv.org/abs/2105.12723 The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights have been converted with convert/convert_nest_flax.py Acknowledgments: * The paper authors for sharing their research, code, and model weights * Ross Wightman's existing code off which I based this Copyright 2021 Alexander Soare """ import collections.abc import logging import math from functools import partial import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): """ This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with an extra "image block" dim """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v # (B, H, T, N, C'), permute -> (B, T, N, C', H) x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x # (B, T, N, C) class TransformerLayer(nn.Module): """ This is much like `.vision_transformer.Block` but: - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") - Uses modified Attention layer that handles the "block" dimension """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): """ x is expected to have shape (B, C, H, W) """ _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) # Layer norm done over channel dim only x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x # (B, C, H//2, W//2) def blockify(x, block_size: int): """image to blocks Args: x (Tensor): with shape (B, H, W, C) block_size (int): edge length of a single square block in units of H, W """ B, H, W, C = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x # (B, T, N, C) @register_notrace_function # reason: int receives Proxy def deblockify(x, block_size: int): """blocks to image Args: x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block block_size (int): edge length of a single square block in units of desired H, W """ B, T, _, C = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x # (B, H, W, C) class NestLevel(nn.Module): """ Single hierarchical level of a Nested Transformer """ def __init__( self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=[], norm_layer=None, act_layer=None, pad_type='', ): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() # Transformer encoder if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[ TransformerLayer( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): """ expects x as (B, C, H, W) """ x = self.pool(x) x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer x = blockify(x, self.block_size) # (B, T, N, C') x = x + self.pos_embed if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) # (B, T, N, C') x = deblockify(x, self.block_size) # (B, H', W', C') # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage return x.permute(0, 3, 1, 2) # (B, C, H', W') class Nest(nn.Module): """ Nested Transformer (NesT) A PyTorch impl of : `Aggregating Nested Transformers` - https://arxiv.org/abs/2105.12723 """ def __init__( self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg', ): """ Args: img_size (int, tuple): input image size in_chans (int): number of input channels patch_size (int): patch size num_levels (int): number of block hierarchies (T_d in the paper) embed_dims (int, tuple): embedding dimensions of each level num_heads (int, tuple): number of attention heads for each level depths (int, tuple): number of transformer layers for each level num_classes (int): number of classes for classification head mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer for transformer layers act_layer: (nn.Module): activation layer in MLP of transformer layers pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME weight_init: (str): weight init scheme global_pool: (str): type of pooling operation to apply to final feature map Notes: - Default values follow NesT-B from the original Jax code. - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. - For those following the paper, Table A1 may have errors! - https://github.com/google-research/nested-transformer/issues/2 """ super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size # Number of blocks at each level self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' # Block edge size in units of patches # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False, ) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] # Build up each hierarchical level levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel( self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type, )) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) # Final normalization layer self.norm = norm_layer(embed_dims[-1]) # Classifier global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. for level in self.levels: trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[ (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): """ NesT weight initialization Can replicate Jax implementation. Otherwise follows vision_transformer.py """ if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): """ Rescale the grid of position embeddings when loading from state_dict Expected shape of position embeddings is (1, T, N, C), and considers only square images """ _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] num_blocks_new, seq_length_new = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new*seq_length_new)) # First change to (1, C, H, W) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) # Now change to new (1, T, N, C) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): """ resize positional embeddings of pretrained weights """ pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg( Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), # (weights from official Google JAX impl, require 'SAME' padding) 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, { 'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/nfnet.py
""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 Paper: `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets Status: * These models are a work in progress, experiments ongoing. * Pretrained weights for two models so far, more to come. * Model details updated to closer match official JAX code now that it's released * NF-ResNet, NF-RegNet-B, and NFNet-F models supported Hacked together by / copyright Ross Wightman, 2021. """ from collections import OrderedDict from dataclasses import dataclass, replace from functools import partial from typing import Callable, Tuple, Optional import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \ get_act_layer, get_act_fn, get_attn, make_divisible from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this @dataclass class NfCfg: depths: Tuple[int, int, int, int] channels: Tuple[int, int, int, int] alpha: float = 0.2 stem_type: str = '3x3' stem_chs: Optional[int] = None group_size: Optional[int] = None attn_layer: Optional[str] = None attn_kwargs: dict = None attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used width_factor: float = 1.0 bottle_ratio: float = 0.5 num_features: int = 0 # num out_channels for final conv, no final_conv if 0 ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models gamma_in_act: bool = False same_padding: bool = False std_conv_eps: float = 1e-5 skipinit: bool = False # disabled by default, non-trivial performance impact zero_init_fc: bool = False act_layer: str = 'silu' class GammaAct(nn.Module): def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False): super().__init__() self.act_fn = get_act_fn(act_type) self.gamma = gamma self.inplace = inplace def forward(self, x): return self.act_fn(x, inplace=self.inplace).mul_(self.gamma) def act_with_gamma(act_type, gamma: float = 1.): def _create(inplace=False): return GammaAct(act_type, gamma=gamma, inplace=inplace) return _create class DownsampleAvg(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, conv_layer: Callable = ScaledStdConv2d, ): """ AvgPool Downsampling as in 'D' ResNet variants. Support for dilation.""" super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) def forward(self, x): return self.conv(self.pool(x)) @register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301 class NormFreeBlock(nn.Module): """Normalization-Free pre-activation block. """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, stride: int = 1, dilation: int = 1, first_dilation: Optional[int] = None, alpha: float = 1.0, beta: float = 1.0, bottle_ratio: float = 0.25, group_size: Optional[int] = None, ch_div: int = 1, reg: bool = True, extra_conv: bool = False, skipinit: bool = False, attn_layer: Optional[Callable] = None, attn_gain: bool = 2.0, act_layer: Optional[Callable] = None, conv_layer: Callable = ScaledStdConv2d, drop_path_rate: float = 0., ): super().__init__() first_dilation = first_dilation or dilation out_chs = out_chs or in_chs # RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div) groups = 1 if not group_size else mid_chs // group_size if group_size and group_size % ch_div == 0: mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error self.alpha = alpha self.beta = beta self.attn_gain = attn_gain if in_chs != out_chs or stride != 1 or dilation != first_dilation: self.downsample = DownsampleAvg( in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, conv_layer=conv_layer, ) else: self.downsample = None self.act1 = act_layer() self.conv1 = conv_layer(in_chs, mid_chs, 1) self.act2 = act_layer(inplace=True) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) if extra_conv: self.act2b = act_layer(inplace=True) self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups) else: self.act2b = None self.conv2b = None if reg and attn_layer is not None: self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3 else: self.attn = None self.act3 = act_layer() self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.) if not reg and attn_layer is not None: self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3 else: self.attn_last = None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None def forward(self, x): out = self.act1(x) * self.beta # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(out) # residual branch out = self.conv1(out) out = self.conv2(self.act2(out)) if self.conv2b is not None: out = self.conv2b(self.act2b(out)) if self.attn is not None: out = self.attn_gain * self.attn(out) out = self.conv3(self.act3(out)) if self.attn_last is not None: out = self.attn_gain * self.attn_last(out) out = self.drop_path(out) if self.skipinit_gain is not None: out.mul_(self.skipinit_gain) out = out * self.alpha + shortcut return out def create_stem( in_chs: int, out_chs: int, stem_type: str = '', conv_layer: Optional[Callable] = None, act_layer: Optional[Callable] = None, preact_feature: bool = True, ): stem_stride = 2 stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv') stem = OrderedDict() assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool') if 'deep' in stem_type: if 'quad' in stem_type: # 4 deep conv stack as in NFNet-F models assert not 'pool' in stem_type stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs) strides = (2, 1, 1, 2) stem_stride = 4 stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3') else: if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py else: stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets strides = (2, 1, 1) stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2') last_idx = len(stem_chs) - 1 for i, (c, s) in enumerate(zip(stem_chs, strides)): stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s) if i != last_idx: stem[f'act{i + 2}'] = act_layer(inplace=True) in_chs = c elif '3x3' in stem_type: # 3x3 stem conv as in RegNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2) else: # 7x7 stem conv as in ResNet stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if 'pool' in stem_type: stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1) stem_stride = 4 return nn.Sequential(stem), stem_stride, stem_feature # from https://github.com/deepmind/deepmind-research/tree/master/nfnets _nonlin_gamma = dict( identity=1.0, celu=1.270926833152771, elu=1.2716004848480225, gelu=1.7015043497085571, leaky_relu=1.70590341091156, log_sigmoid=1.9193484783172607, log_softmax=1.0002083778381348, relu=1.7139588594436646, relu6=1.7131484746932983, selu=1.0008515119552612, sigmoid=4.803835391998291, silu=1.7881293296813965, softsign=2.338853120803833, softplus=1.9203323125839233, tanh=1.5939117670059204, ) class NormFreeNet(nn.Module): """ Normalization-Free Network As described in : `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 and `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and the (preact) ResNet models described earlier in the paper. There are a few differences: * channels are rounded to be divisible by 8 by default (keep tensor core kernels happy), this changes channel dim and param counts slightly from the paper models * activation correcting gamma constants are moved into the ScaledStdConv as it has less performance impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl. * a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but apply it in each activation. This is slightly slower, numerically different, but matches official impl. * skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput for what it is/does. Approx 8-10% throughput loss. """ def __init__( self, cfg: NfCfg, num_classes: int = 1000, in_chans: int = 3, global_pool: str = 'avg', output_stride: int = 32, drop_rate: float = 0., drop_path_rate: float = 0., **kwargs, ): """ Args: cfg: Model architecture configuration. num_classes: Number of classifier classes. in_chans: Number of input channels. global_pool: Global pooling type. output_stride: Output stride of network, one of (8, 16, 32). drop_rate: Dropout rate. drop_path_rate: Stochastic depth drop-path rate. **kwargs: Extra kwargs overlayed onto cfg. """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False cfg = replace(cfg, **kwargs) assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})." conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d if cfg.gamma_in_act: act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer]) conv_layer = partial(conv_layer, eps=cfg.std_conv_eps) else: act_layer = get_act_layer(cfg.act_layer) conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps) attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div) self.stem, stem_stride, stem_feat = create_stem( in_chans, stem_chs, cfg.stem_type, conv_layer=conv_layer, act_layer=act_layer, ) self.feature_info = [stem_feat] drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)] prev_chs = stem_chs net_stride = stem_stride dilation = 1 expected_var = 1.0 stages = [] for stage_idx, stage_depth in enumerate(cfg.depths): stride = 1 if stage_idx == 0 and stem_stride > 2 else 2 if net_stride >= output_stride and stride > 1: dilation *= stride stride = 1 net_stride *= stride first_dilation = 1 if dilation in (1, 2) else 2 blocks = [] for block_idx in range(cfg.depths[stage_idx]): first_block = block_idx == 0 and stage_idx == 0 out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div) blocks += [NormFreeBlock( in_chs=prev_chs, out_chs=out_chs, alpha=cfg.alpha, beta=1. / expected_var ** 0.5, stride=stride if block_idx == 0 else 1, dilation=dilation, first_dilation=first_dilation, group_size=cfg.group_size, bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio, ch_div=cfg.ch_div, reg=cfg.reg, extra_conv=cfg.extra_conv, skipinit=cfg.skipinit, attn_layer=attn_layer, attn_gain=cfg.attn_gain, act_layer=act_layer, conv_layer=conv_layer, drop_path_rate=drop_path_rates[stage_idx][block_idx], )] if block_idx == 0: expected_var = 1. # expected var is reset after first block of each stage expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance first_dilation = dilation prev_chs = out_chs self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')] stages += [nn.Sequential(*blocks)] self.stages = nn.Sequential(*stages) if cfg.num_features: # The paper NFRegNet models have an EfficientNet-like final head convolution. self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div) self.final_conv = conv_layer(prev_chs, self.num_features, 1) self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv') else: self.num_features = prev_chs self.final_conv = nn.Identity() self.final_act = act_layer(inplace=cfg.num_features > 0) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) for n, m in self.named_modules(): if 'fc' in n and isinstance(m, nn.Linear): if cfg.zero_init_fc: nn.init.zeros_(m.weight) else: nn.init.normal_(m.weight, 0., .01) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear') if m.bias is not None: nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None), (r'^final_conv', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) x = self.final_conv(x) x = self.final_act(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _nfres_cfg( depths, channels=(256, 512, 1024, 2048), group_size=None, act_layer='relu', attn_layer=None, attn_kwargs=None, ): attn_kwargs = attn_kwargs or {} cfg = NfCfg( depths=depths, channels=channels, stem_type='7x7_pool', stem_chs=64, bottle_ratio=0.25, group_size=group_size, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _nfreg_cfg(depths, channels=(48, 104, 208, 440)): num_features = 1280 * channels[-1] // 440 attn_kwargs = dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='3x3', group_size=8, width_factor=0.75, bottle_ratio=2.25, num_features=num_features, reg=True, attn_layer='se', attn_kwargs=attn_kwargs, ) return cfg def _nfnet_cfg( depths, channels=(256, 512, 1536, 1536), group_size=128, bottle_ratio=0.5, feat_mult=2., act_layer='gelu', attn_layer='se', attn_kwargs=None, ): num_features = int(channels[-1] * feat_mult) attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5) cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=group_size, bottle_ratio=bottle_ratio, extra_conv=True, num_features=num_features, act_layer=act_layer, attn_layer=attn_layer, attn_kwargs=attn_kwargs, ) return cfg def _dm_nfnet_cfg( depths, channels=(256, 512, 1536, 1536), act_layer='gelu', skipinit=True, ): cfg = NfCfg( depths=depths, channels=channels, stem_type='deep_quad', stem_chs=128, group_size=128, bottle_ratio=0.5, extra_conv=True, gamma_in_act=True, same_padding=True, skipinit=skipinit, num_features=int(channels[-1] * 2.0), act_layer=act_layer, attn_layer='se', attn_kwargs=dict(rd_ratio=0.5), ) return cfg model_cfgs = dict( # NFNet-F models w/ GELU compatible with DeepMind weights dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)), dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)), dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)), dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)), dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)), dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)), dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)), # NFNet-F models w/ GELU nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)), nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)), nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)), nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)), nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)), nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)), nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)), nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)), # Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'), eca_nfnet_l0=_nfnet_cfg( depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l1=_nfnet_cfg( depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l2=_nfnet_cfg( depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), eca_nfnet_l3=_nfnet_cfg( depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25, attn_layer='eca', attn_kwargs=dict(), act_layer='silu'), # EffNet influenced RegNet defs. # NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8. nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)), nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)), nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)), nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)), nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)), nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)), # ResNet (preact, D style deep stem/avg down) defs nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)), nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)), nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)), nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)), nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()), nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()), ) def _create_normfreenet(variant, pretrained=False, **kwargs): model_cfg = model_cfgs[variant] feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( NormFreeNet, variant, pretrained, model_cfg=model_cfg, feature_cfg=feature_cfg, **kwargs, ) def _dcfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'dm_nfnet_f0.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'), 'dm_nfnet_f1.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'), 'dm_nfnet_f2.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'), 'dm_nfnet_f3.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'), 'dm_nfnet_f4.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'), 'dm_nfnet_f5.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'), 'dm_nfnet_f6.dm_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'), 'nfnet_f0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)), 'nfnet_f1': _dcfg( url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)), 'nfnet_f2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)), 'nfnet_f3': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)), 'nfnet_f4': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)), 'nfnet_f5': _dcfg( url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)), 'nfnet_f6': _dcfg( url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)), 'nfnet_f7': _dcfg( url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)), 'nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l0.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0), 'eca_nfnet_l1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0), 'eca_nfnet_l2.ra3_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'eca_nfnet_l3': _dcfg( url='', pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0), 'nf_regnet_b0': _dcfg( url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'), 'nf_regnet_b1.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec 'nf_regnet_b2': _dcfg( url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'), 'nf_regnet_b3': _dcfg( url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'), 'nf_regnet_b4': _dcfg( url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'), 'nf_regnet_b5': _dcfg( url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'), 'nf_resnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_resnet50.ra2_in1k': _dcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'), 'nf_resnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'), 'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'), }) @register_model def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F0 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F1 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F2 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F3 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F4 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F5 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs) @register_model def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F6 (DeepMind weight compatible) `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F0 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs) @register_model def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F1 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs) @register_model def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F2 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs) @register_model def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F3 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs) @register_model def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F4 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs) @register_model def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F5 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs) @register_model def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F6 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs) @register_model def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-F7 `High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171 """ return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs) @register_model def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: """ NFNet-L0b w/ SiLU My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio """ return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L0 w/ SiLU My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L1 w/ SiLU My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L2 w/ SiLU My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs) @register_model def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet: """ ECA-NFNet-L3 w/ SiLU My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn """ return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B0 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B1 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B2 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B3 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B4 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs) @register_model def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free RegNet-B5 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs) @register_model def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-26 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs) @register_model def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-50 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs) @register_model def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ResNet-101 `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 """ return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs) @register_model def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet26 """ return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs) @register_model def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet50 """ return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs) @register_model def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free SE-ResNet101 """ return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet26 """ return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet50 """ return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs) @register_model def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet: """ Normalization-Free ECA-ResNet101 """ return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/pit.py
""" Pooling-based Vision Transformer (PiT) in PyTorch A PyTorch implement of Pooling-based Vision Transformers as described in 'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below. Modifications for timm by / Copyright 2020 Ross Wightman """ # PiT # Copyright 2021-present NAVER Corp. # Apache License v2.0 import math import re from functools import partial from typing import Sequence, Tuple import torch from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import trunc_normal_, to_2tuple, LayerNorm from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .vision_transformer import Block __all__ = ['PoolingVisionTransformer'] # model_registry will add each entrypoint fn to this class SequentialTuple(nn.Sequential): """ This module exists to work around torchscript typing issues list -> list""" def __init__(self, *args): super(SequentialTuple, self).__init__(*args) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: for module in self: x = module(x) return x class Transformer(nn.Module): def __init__( self, base_dim, depth, heads, mlp_ratio, pool=None, proj_drop=.0, attn_drop=.0, drop_path_prob=None, norm_layer=None, ): super(Transformer, self).__init__() embed_dim = base_dim * heads self.pool = pool self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() self.blocks = nn.Sequential(*[ Block( dim=embed_dim, num_heads=heads, mlp_ratio=mlp_ratio, qkv_bias=True, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path_prob[i], norm_layer=partial(nn.LayerNorm, eps=1e-6) ) for i in range(depth)]) def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]: x, cls_tokens = x token_length = cls_tokens.shape[1] if self.pool is not None: x, cls_tokens = self.pool(x, cls_tokens) B, C, H, W = x.shape x = x.flatten(2).transpose(1, 2) x = torch.cat((cls_tokens, x), dim=1) x = self.norm(x) x = self.blocks(x) cls_tokens = x[:, :token_length] x = x[:, token_length:] x = x.transpose(1, 2).reshape(B, C, H, W) return x, cls_tokens class Pooling(nn.Module): def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'): super(Pooling, self).__init__() self.conv = nn.Conv2d( in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride, padding_mode=padding_mode, groups=in_feature, ) self.fc = nn.Linear(in_feature, out_feature) def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]: x = self.conv(x) cls_token = self.fc(cls_token) return x, cls_token class ConvEmbedding(nn.Module): def __init__( self, in_channels, out_channels, img_size: int = 224, patch_size: int = 16, stride: int = 8, padding: int = 0, ): super(ConvEmbedding, self).__init__() padding = padding self.img_size = to_2tuple(img_size) self.patch_size = to_2tuple(patch_size) self.height = math.floor((self.img_size[0] + 2 * padding - self.patch_size[0]) / stride + 1) self.width = math.floor((self.img_size[1] + 2 * padding - self.patch_size[1]) / stride + 1) self.grid_size = (self.height, self.width) self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True) def forward(self, x): x = self.conv(x) return x class PoolingVisionTransformer(nn.Module): """ Pooling-based Vision Transformer A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302 """ def __init__( self, img_size: int = 224, patch_size: int = 16, stride: int = 8, stem_type: str = 'overlap', base_dims: Sequence[int] = (48, 48, 48), depth: Sequence[int] = (2, 6, 4), heads: Sequence[int] = (2, 4, 8), mlp_ratio: float = 4, num_classes=1000, in_chans=3, global_pool='token', distilled=False, drop_rate=0., pos_drop_drate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., ): super(PoolingVisionTransformer, self).__init__() assert global_pool in ('token',) self.base_dims = base_dims self.heads = heads embed_dim = base_dims[0] * heads[0] self.num_classes = num_classes self.global_pool = global_pool self.num_tokens = 2 if distilled else 1 self.feature_info = [] self.patch_embed = ConvEmbedding(in_chans, embed_dim, img_size, patch_size, stride) self.pos_embed = nn.Parameter(torch.randn(1, embed_dim, self.patch_embed.height, self.patch_embed.width)) self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, embed_dim)) self.pos_drop = nn.Dropout(p=pos_drop_drate) transformers = [] # stochastic depth decay rule dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)] prev_dim = embed_dim for i in range(len(depth)): pool = None embed_dim = base_dims[i] * heads[i] if i > 0: pool = Pooling( prev_dim, embed_dim, stride=2, ) transformers += [Transformer( base_dims[i], depth[i], heads[i], mlp_ratio, pool=pool, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path_prob=dpr[i], )] prev_dim = embed_dim self.feature_info += [dict(num_chs=prev_dim, reduction=(stride - 1) * 2**i, module=f'transformers.{i}')] self.transformers = SequentialTuple(*transformers) self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6) self.num_features = self.embed_dim = embed_dim # Classifier head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = None if distilled: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() self.distilled_training = False # must set this True to train w/ distillation token trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' def get_classifier(self): if self.head_dist is not None: return self.head, self.head_dist else: return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.head_dist is not None: self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.pos_drop(x + self.pos_embed) cls_tokens = self.cls_token.expand(x.shape[0], -1, -1) x, cls_tokens = self.transformers((x, cls_tokens)) cls_tokens = self.norm(cls_tokens) return cls_tokens def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: if self.head_dist is not None: assert self.global_pool == 'token' x, x_dist = x[:, 0], x[:, 1] x = self.head_drop(x) x_dist = self.head_drop(x) if not pre_logits: x = self.head(x) x_dist = self.head_dist(x_dist) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train / finetune, inference average the classifier predictions return (x + x_dist) / 2 else: if self.global_pool == 'token': x = x[:, 0] x = self.head_drop(x) if not pre_logits: x = self.head(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ preprocess checkpoints """ out_dict = {} p_blocks = re.compile(r'pools\.(\d)\.') for k, v in state_dict.items(): # FIXME need to update resize for PiT impl # if k == 'pos_embed' and v.shape != model.pos_embed.shape: # # To resize pos embedding when using model at different size from pretrained weights # v = resize_pos_embed(v, model.pos_embed) k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1)) + 1}.pool.', k) out_dict[k] = v return out_dict def _create_pit(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( PoolingVisionTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(feature_cls='hook', no_rewrite=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # deit models (FB weights) 'pit_ti_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_xs_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_s_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_b_224.in1k': _cfg(hf_hub_id='timm/'), 'pit_ti_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_xs_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_s_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), 'pit_b_distilled_224.in1k': _cfg( hf_hub_id='timm/', classifier=('head', 'head_dist')), }) @register_model def pit_b_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4, ) return _create_pit('pit_b_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4, ) return _create_pit('pit_s_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, ) return _create_pit('pit_xs_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, ) return _create_pit('pit_ti_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_b_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=14, stride=7, base_dims=[64, 64, 64], depth=[3, 6, 4], heads=[4, 8, 16], mlp_ratio=4, distilled=True, ) return _create_pit('pit_b_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_s_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[3, 6, 12], mlp_ratio=4, distilled=True, ) return _create_pit('pit_s_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_xs_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[48, 48, 48], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True, ) return _create_pit('pit_xs_distilled_224', pretrained, **dict(model_args, **kwargs)) @register_model def pit_ti_distilled_224(pretrained=False, **kwargs) -> PoolingVisionTransformer: model_args = dict( patch_size=16, stride=8, base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4, distilled=True, ) return _create_pit('pit_ti_distilled_224', pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/pnasnet.py
""" pnasnet5large implementation grabbed from Cadene's pretrained models Additional credit to https://github.com/creafz https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/pnasnet.py """ from collections import OrderedDict from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.layers import ConvNormAct, create_conv2d, create_pool2d, create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PNASNet5Large'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, padding=''): super(SeparableConv2d, self).__init__() self.depthwise_conv2d = create_conv2d( in_channels, in_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=in_channels) self.pointwise_conv2d = create_conv2d( in_channels, out_channels, kernel_size=1, padding=padding) def forward(self, x): x = self.depthwise_conv2d(x) x = self.pointwise_conv2d(x) return x class BranchSeparables(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, stem_cell=False, padding=''): super(BranchSeparables, self).__init__() middle_channels = out_channels if stem_cell else in_channels self.act_1 = nn.ReLU() self.separable_1 = SeparableConv2d( in_channels, middle_channels, kernel_size, stride=stride, padding=padding) self.bn_sep_1 = nn.BatchNorm2d(middle_channels, eps=0.001) self.act_2 = nn.ReLU() self.separable_2 = SeparableConv2d( middle_channels, out_channels, kernel_size, stride=1, padding=padding) self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act_1(x) x = self.separable_1(x) x = self.bn_sep_1(x) x = self.act_2(x) x = self.separable_2(x) x = self.bn_sep_2(x) return x class ActConvBn(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=''): super(ActConvBn, self).__init__() self.act = nn.ReLU() self.conv = create_conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x = self.conv(x) x = self.bn(x) return x class FactorizedReduction(nn.Module): def __init__(self, in_channels, out_channels, padding=''): super(FactorizedReduction, self).__init__() self.act = nn.ReLU() self.path_1 = nn.Sequential(OrderedDict([ ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), ])) self.path_2 = nn.Sequential(OrderedDict([ ('pad', nn.ZeroPad2d((-1, 1, -1, 1))), # shift ('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False)), ('conv', create_conv2d(in_channels, out_channels // 2, kernel_size=1, padding=padding)), ])) self.final_path_bn = nn.BatchNorm2d(out_channels, eps=0.001) def forward(self, x): x = self.act(x) x_path1 = self.path_1(x) x_path2 = self.path_2(x) out = self.final_path_bn(torch.cat([x_path1, x_path2], 1)) return out class CellBase(nn.Module): def cell_forward(self, x_left, x_right): x_comb_iter_0_left = self.comb_iter_0_left(x_left) x_comb_iter_0_right = self.comb_iter_0_right(x_left) x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right x_comb_iter_1_left = self.comb_iter_1_left(x_right) x_comb_iter_1_right = self.comb_iter_1_right(x_right) x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right x_comb_iter_2_left = self.comb_iter_2_left(x_right) x_comb_iter_2_right = self.comb_iter_2_right(x_right) x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right x_comb_iter_3_left = self.comb_iter_3_left(x_comb_iter_2) x_comb_iter_3_right = self.comb_iter_3_right(x_right) x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right x_comb_iter_4_left = self.comb_iter_4_left(x_left) if self.comb_iter_4_right is not None: x_comb_iter_4_right = self.comb_iter_4_right(x_right) else: x_comb_iter_4_right = x_right x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right x_out = torch.cat([x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1) return x_out class CellStem0(CellBase): def __init__(self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type=''): super(CellStem0, self).__init__() self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables( in_chs_left, out_chs_left, kernel_size=5, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_0_right = nn.Sequential(OrderedDict([ ('max_pool', create_pool2d('max', 3, stride=2, padding=pad_type)), ('conv', create_conv2d(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type)), ('bn', nn.BatchNorm2d(out_chs_left, eps=0.001)), ])) self.comb_iter_1_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=7, stride=2, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_2_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=5, stride=2, padding=pad_type) self.comb_iter_2_right = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, stride=2, padding=pad_type) self.comb_iter_3_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, padding=pad_type) self.comb_iter_3_right = create_pool2d('max', 3, stride=2, padding=pad_type) self.comb_iter_4_left = BranchSeparables( in_chs_right, out_chs_right, kernel_size=3, stride=2, stem_cell=True, padding=pad_type) self.comb_iter_4_right = ActConvBn( out_chs_right, out_chs_right, kernel_size=1, stride=2, padding=pad_type) def forward(self, x_left): x_right = self.conv_1x1(x_left) x_out = self.cell_forward(x_left, x_right) return x_out class Cell(CellBase): def __init__( self, in_chs_left, out_chs_left, in_chs_right, out_chs_right, pad_type='', is_reduction=False, match_prev_layer_dims=False, ): super(Cell, self).__init__() # If `is_reduction` is set to `True` stride 2 is used for # convolution and pooling layers to reduce the spatial size of # the output of a cell approximately by a factor of 2. stride = 2 if is_reduction else 1 # If `match_prev_layer_dimensions` is set to `True` # `FactorizedReduction` is used to reduce the spatial size # of the left input of a cell approximately by a factor of 2. self.match_prev_layer_dimensions = match_prev_layer_dims if match_prev_layer_dims: self.conv_prev_1x1 = FactorizedReduction(in_chs_left, out_chs_left, padding=pad_type) else: self.conv_prev_1x1 = ActConvBn(in_chs_left, out_chs_left, kernel_size=1, padding=pad_type) self.conv_1x1 = ActConvBn(in_chs_right, out_chs_right, kernel_size=1, padding=pad_type) self.comb_iter_0_left = BranchSeparables( out_chs_left, out_chs_left, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_0_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_1_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=7, stride=stride, padding=pad_type) self.comb_iter_1_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_2_left = BranchSeparables( out_chs_right, out_chs_right, kernel_size=5, stride=stride, padding=pad_type) self.comb_iter_2_right = BranchSeparables( out_chs_right, out_chs_right, kernel_size=3, stride=stride, padding=pad_type) self.comb_iter_3_left = BranchSeparables(out_chs_right, out_chs_right, kernel_size=3) self.comb_iter_3_right = create_pool2d('max', 3, stride=stride, padding=pad_type) self.comb_iter_4_left = BranchSeparables( out_chs_left, out_chs_left, kernel_size=3, stride=stride, padding=pad_type) if is_reduction: self.comb_iter_4_right = ActConvBn( out_chs_right, out_chs_right, kernel_size=1, stride=stride, padding=pad_type) else: self.comb_iter_4_right = None def forward(self, x_left, x_right): x_left = self.conv_prev_1x1(x_left) x_right = self.conv_1x1(x_right) x_out = self.cell_forward(x_left, x_right) return x_out class PNASNet5Large(nn.Module): def __init__( self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0., global_pool='avg', pad_type='', ): super(PNASNet5Large, self).__init__() self.num_classes = num_classes self.num_features = 4320 assert output_stride == 32 self.conv_0 = ConvNormAct( in_chans, 96, kernel_size=3, stride=2, padding=0, norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.1), apply_act=False) self.cell_stem_0 = CellStem0( in_chs_left=96, out_chs_left=54, in_chs_right=96, out_chs_right=54, pad_type=pad_type) self.cell_stem_1 = Cell( in_chs_left=96, out_chs_left=108, in_chs_right=270, out_chs_right=108, pad_type=pad_type, match_prev_layer_dims=True, is_reduction=True) self.cell_0 = Cell( in_chs_left=270, out_chs_left=216, in_chs_right=540, out_chs_right=216, pad_type=pad_type, match_prev_layer_dims=True) self.cell_1 = Cell( in_chs_left=540, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_2 = Cell( in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_3 = Cell( in_chs_left=1080, out_chs_left=216, in_chs_right=1080, out_chs_right=216, pad_type=pad_type) self.cell_4 = Cell( in_chs_left=1080, out_chs_left=432, in_chs_right=1080, out_chs_right=432, pad_type=pad_type, is_reduction=True) self.cell_5 = Cell( in_chs_left=1080, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type, match_prev_layer_dims=True) self.cell_6 = Cell( in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_7 = Cell( in_chs_left=2160, out_chs_left=432, in_chs_right=2160, out_chs_right=432, pad_type=pad_type) self.cell_8 = Cell( in_chs_left=2160, out_chs_left=864, in_chs_right=2160, out_chs_right=864, pad_type=pad_type, is_reduction=True) self.cell_9 = Cell( in_chs_left=2160, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type, match_prev_layer_dims=True) self.cell_10 = Cell( in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.cell_11 = Cell( in_chs_left=4320, out_chs_left=864, in_chs_right=4320, out_chs_right=864, pad_type=pad_type) self.act = nn.ReLU() self.feature_info = [ dict(num_chs=96, reduction=2, module='conv_0'), dict(num_chs=270, reduction=4, module='cell_stem_1.conv_1x1.act'), dict(num_chs=1080, reduction=8, module='cell_4.conv_1x1.act'), dict(num_chs=2160, reduction=16, module='cell_8.conv_1x1.act'), dict(num_chs=4320, reduction=32, module='act'), ] self.global_pool, self.head_drop, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate) @torch.jit.ignore def group_matcher(self, coarse=False): return dict(stem=r'^conv_0|cell_stem_[01]', blocks=r'^cell_(\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x_conv_0 = self.conv_0(x) x_stem_0 = self.cell_stem_0(x_conv_0) x_stem_1 = self.cell_stem_1(x_conv_0, x_stem_0) x_cell_0 = self.cell_0(x_stem_0, x_stem_1) x_cell_1 = self.cell_1(x_stem_1, x_cell_0) x_cell_2 = self.cell_2(x_cell_0, x_cell_1) x_cell_3 = self.cell_3(x_cell_1, x_cell_2) x_cell_4 = self.cell_4(x_cell_2, x_cell_3) x_cell_5 = self.cell_5(x_cell_3, x_cell_4) x_cell_6 = self.cell_6(x_cell_4, x_cell_5) x_cell_7 = self.cell_7(x_cell_5, x_cell_6) x_cell_8 = self.cell_8(x_cell_6, x_cell_7) x_cell_9 = self.cell_9(x_cell_7, x_cell_8) x_cell_10 = self.cell_10(x_cell_8, x_cell_9) x_cell_11 = self.cell_11(x_cell_9, x_cell_10) x = self.act(x_cell_11) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_pnasnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( PNASNet5Large, variant, pretrained, feature_cfg=dict(feature_cls='hook', no_rewrite=True), # not possible to re-write this model **kwargs, ) default_cfgs = generate_default_cfgs({ 'pnasnet5large.tf_in1k': { 'hf_hub_id': 'timm/', 'input_size': (3, 331, 331), 'pool_size': (11, 11), 'crop_pct': 0.911, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv_0.conv', 'classifier': 'last_linear', }, }) @register_model def pnasnet5large(pretrained=False, **kwargs) -> PNASNet5Large: r"""PNASNet-5 model architecture from the `"Progressive Neural Architecture Search" <https://arxiv.org/abs/1712.00559>`_ paper. """ model_kwargs = dict(pad_type='same', **kwargs) return _create_pnasnet('pnasnet5large', pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/pvt_v2.py
""" Pyramid Vision Transformer v2 @misc{wang2021pvtv2, title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, author={Wenhai Wang and Enze Xie and Xiang Li and Deng-Ping Fan and Kaitao Song and Ding Liang and Tong Lu and Ping Luo and Ling Shao}, year={2021}, eprint={2106.13797}, archivePrefix={arXiv}, primaryClass={cs.CV} } Based on Apache 2.0 licensed code at https://github.com/whai362/PVT Modifications and timm support by / Copyright 2022, Ross Wightman """ import math from typing import Tuple, List, Callable, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, to_2tuple, to_ntuple, trunc_normal_, LayerNorm, use_fused_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['PyramidVisionTransformerV2'] class MlpWithDepthwiseConv(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., extra_relu=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.relu = nn.ReLU() if extra_relu else nn.Identity() self.dwconv = nn.Conv2d(hidden_features, hidden_features, 3, 1, 1, bias=True, groups=hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x, feat_size: List[int]): x = self.fc1(x) B, N, C = x.shape x = x.transpose(1, 2).view(B, C, feat_size[0], feat_size[1]) x = self.relu(x) x = self.dwconv(x) x = x.flatten(2).transpose(1, 2) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__( self, dim, num_heads=8, sr_ratio=1, linear_attn=False, qkv_bias=True, attn_drop=0., proj_drop=0. ): super().__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=qkv_bias) self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) if not linear_attn: self.pool = None if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None self.act = None else: self.pool = nn.AdaptiveAvgPool2d(7) self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1) self.norm = nn.LayerNorm(dim) self.act = nn.GELU() def forward(self, x, feat_size: List[int]): B, N, C = x.shape H, W = feat_size q = self.q(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) if self.pool is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(self.pool(x)).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) x = self.act(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) else: if self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) else: kv = self.kv(x).reshape(B, -1, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., sr_ratio=1, linear_attn=False, qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = MlpWithDepthwiseConv( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, extra_relu=linear_attn, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, feat_size: List[int]): x = x + self.drop_path1(self.attn(self.norm1(x), feat_size)) x = x + self.drop_path2(self.mlp(self.norm2(x), feat_size)) return x class OverlapPatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768): super().__init__() patch_size = to_2tuple(patch_size) assert max(patch_size) > stride, "Set larger patch_size than stride" self.patch_size = patch_size self.proj = nn.Conv2d( in_chans, embed_dim, patch_size, stride=stride, padding=(patch_size[0] // 2, patch_size[1] // 2)) self.norm = nn.LayerNorm(embed_dim) def forward(self, x): x = self.proj(x) x = x.permute(0, 2, 3, 1) x = self.norm(x) return x class PyramidVisionTransformerStage(nn.Module): def __init__( self, dim: int, dim_out: int, depth: int, downsample: bool = True, num_heads: int = 8, sr_ratio: int = 1, linear_attn: bool = False, mlp_ratio: float = 4.0, qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: Union[List[float], float] = 0.0, norm_layer: Callable = LayerNorm, ): super().__init__() self.grad_checkpointing = False if downsample: self.downsample = OverlapPatchEmbed( patch_size=3, stride=2, in_chans=dim, embed_dim=dim_out, ) else: assert dim == dim_out self.downsample = None self.blocks = nn.ModuleList([Block( dim=dim_out, num_heads=num_heads, sr_ratio=sr_ratio, linear_attn=linear_attn, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, ) for i in range(depth)]) self.norm = norm_layer(dim_out) def forward(self, x): # x is either B, C, H, W (if downsample) or B, H, W, C if not if self.downsample is not None: # input to downsample is B, C, H, W x = self.downsample(x) # output B, H, W, C B, H, W, C = x.shape feat_size = (H, W) x = x.reshape(B, -1, C) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x, feat_size) else: x = blk(x, feat_size) x = self.norm(x) x = x.reshape(B, feat_size[0], feat_size[1], -1).permute(0, 3, 1, 2).contiguous() return x class PyramidVisionTransformerV2(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', depths=(3, 4, 6, 3), embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), sr_ratios=(8, 4, 2, 1), mlp_ratios=(8., 8., 4., 4.), qkv_bias=True, linear=False, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm, ): super().__init__() self.num_classes = num_classes assert global_pool in ('avg', '') self.global_pool = global_pool self.depths = depths num_stages = len(depths) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) num_heads = to_ntuple(num_stages)(num_heads) sr_ratios = to_ntuple(num_stages)(sr_ratios) assert(len(embed_dims)) == num_stages self.feature_info = [] self.patch_embed = OverlapPatchEmbed( patch_size=7, stride=4, in_chans=in_chans, embed_dim=embed_dims[0], ) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] cur = 0 prev_dim = embed_dims[0] stages = [] for i in range(num_stages): stages += [PyramidVisionTransformerStage( dim=prev_dim, dim_out=embed_dims[i], depth=depths[i], downsample=i > 0, num_heads=num_heads[i], sr_ratio=sr_ratios[i], mlp_ratio=mlp_ratios[i], linear_attn=linear, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, )] prev_dim = embed_dims[i] cur += depths[i] self.feature_info += [dict(num_chs=prev_dim, reduction=4 * 2**i, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) # classification head self.num_features = embed_dims[-1] self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def freeze_patch_emb(self): self.patch_embed.requires_grad = False @torch.jit.ignore def no_weight_decay(self): return {} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('avg', '') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x.mean(dim=(-1, -2)) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'patch_embed.proj.weight' in state_dict: return state_dict # non-original checkpoint, no remapping needed out_dict = {} import re for k, v in state_dict.items(): if k.startswith('patch_embed'): k = k.replace('patch_embed1', 'patch_embed') k = k.replace('patch_embed2', 'stages.1.downsample') k = k.replace('patch_embed3', 'stages.2.downsample') k = k.replace('patch_embed4', 'stages.3.downsample') k = k.replace('dwconv.dwconv', 'dwconv') k = re.sub(r'block(\d+).(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.blocks.{x.group(2)}', k) k = re.sub(r'^norm(\d+)', lambda x: f'stages.{int(x.group(1)) - 1}.norm', k) out_dict[k] = v return out_dict def _create_pvt2(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(4)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( PyramidVisionTransformerV2, variant, pretrained, pretrained_filter_fn=_checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', 'fixed_input_size': False, **kwargs } default_cfgs = generate_default_cfgs({ 'pvt_v2_b0.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b1.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b3.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b4.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b5.in1k': _cfg(hf_hub_id='timm/'), 'pvt_v2_b2_li.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def pvt_v2_b0(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(32, 64, 160, 256), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b1(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(2, 2, 2, 2), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b3(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 4, 18, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b3', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b4(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict(depths=(3, 8, 27, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8)) return _create_pvt2('pvt_v2_b4', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b5(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict( depths=(3, 6, 40, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), mlp_ratios=(4, 4, 4, 4)) return _create_pvt2('pvt_v2_b5', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def pvt_v2_b2_li(pretrained=False, **kwargs) -> PyramidVisionTransformerV2: model_args = dict( depths=(3, 4, 6, 3), embed_dims=(64, 128, 320, 512), num_heads=(1, 2, 5, 8), linear=True) return _create_pvt2('pvt_v2_b2_li', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/registry.py
from ._registry import * import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/regnet.py
"""RegNet X, Y, Z, and more Paper: `Designing Network Design Spaces` - https://arxiv.org/abs/2003.13678 Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py Paper: `Fast and Accurate Model Scaling` - https://arxiv.org/abs/2103.06877 Original Impl: None Based on original PyTorch impl linked above, but re-wrote to use my own blocks (adapted from ResNet here) and cleaned up with more descriptive variable names. Weights from original pycls impl have been modified: * first layer from BGR -> RGB as most PyTorch models are * removed training specific dict entries from checkpoints and keep model state_dict only * remap names to match the ones here Supports weight loading from torchvision and classy-vision (incl VISSL SEER) A number of custom timm model definitions additions including: * stochastic depth, gradient checkpointing, layer-decay, configurable dilation * a pre-activation 'V' variant * only known RegNet-Z model definitions with pretrained weights Hacked together by / Copyright 2020 Ross Wightman """ import math from dataclasses import dataclass, replace from functools import partial from typing import Optional, Union, Callable import numpy as np import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, AvgPool2dSame, ConvNormAct, SEModule, DropPath, GroupNormAct from timm.layers import get_act_layer, get_norm_act_layer, create_conv2d, make_divisible from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq, named_apply from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['RegNet', 'RegNetCfg'] # model_registry will add each entrypoint fn to this @dataclass class RegNetCfg: depth: int = 21 w0: int = 80 wa: float = 42.63 wm: float = 2.66 group_size: int = 24 bottle_ratio: float = 1. se_ratio: float = 0. group_min_ratio: float = 0. stem_width: int = 32 downsample: Optional[str] = 'conv1x1' linear_out: bool = False preact: bool = False num_features: int = 0 act_layer: Union[str, Callable] = 'relu' norm_layer: Union[str, Callable] = 'batchnorm' def quantize_float(f, q): """Converts a float to the closest non-zero int divisible by q.""" return int(round(f / q) * q) def adjust_widths_groups_comp(widths, bottle_ratios, groups, min_ratio=0.): """Adjusts the compatibility of widths and groups.""" bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)] groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)] if min_ratio: # torchvision uses a different rounding scheme for ensuring bottleneck widths divisible by group widths bottleneck_widths = [make_divisible(w_bot, g, min_ratio) for w_bot, g in zip(bottleneck_widths, groups)] else: bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)] widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)] return widths, groups def generate_regnet(width_slope, width_initial, width_mult, depth, group_size, quant=8): """Generates per block widths from RegNet parameters.""" assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % quant == 0 # TODO dWr scaling? # depth = int(depth * (scale ** 0.1)) # width_scale = scale ** 0.4 # dWr scale, exp 0.8 / 2, applied to both group and layer widths widths_cont = np.arange(depth) * width_slope + width_initial width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult)) widths = np.round(np.divide(width_initial * np.power(width_mult, width_exps), quant)) * quant num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1 groups = np.array([group_size for _ in range(num_stages)]) return widths.astype(int).tolist(), num_stages, groups.astype(int).tolist() def downsample_conv( in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False, ): norm_layer = norm_layer or nn.BatchNorm2d kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size dilation = dilation if kernel_size > 1 else 1 if preact: return create_conv2d( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, ) else: return ConvNormAct( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, apply_act=False, ) def downsample_avg( in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False, ): """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" norm_layer = norm_layer or nn.BatchNorm2d avg_stride = stride if dilation == 1 else 1 pool = nn.Identity() if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) if preact: conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False) return nn.Sequential(*[pool, conv]) def create_shortcut( downsample_type, in_chs, out_chs, kernel_size, stride, dilation=(1, 1), norm_layer=None, preact=False, ): assert downsample_type in ('avg', 'conv1x1', '', None) if in_chs != out_chs or stride != 1 or dilation[0] != dilation[1]: dargs = dict(stride=stride, dilation=dilation[0], norm_layer=norm_layer, preact=preact) if not downsample_type: return None # no shortcut, no downsample elif downsample_type == 'avg': return downsample_avg(in_chs, out_chs, **dargs) else: return downsample_conv(in_chs, out_chs, kernel_size=kernel_size, **dargs) else: return nn.Identity() # identity shortcut (no downsample) class Bottleneck(nn.Module): """ RegNet Bottleneck This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. """ def __init__( self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_block=None, drop_path_rate=0., ): super(Bottleneck, self).__init__() act_layer = get_act_layer(act_layer) bottleneck_chs = int(round(out_chs * bottle_ratio)) groups = bottleneck_chs // group_size cargs = dict(act_layer=act_layer, norm_layer=norm_layer) self.conv1 = ConvNormAct(in_chs, bottleneck_chs, kernel_size=1, **cargs) self.conv2 = ConvNormAct( bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=groups, drop_layer=drop_block, **cargs, ) if se_ratio: se_channels = int(round(in_chs * se_ratio)) self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) else: self.se = nn.Identity() self.conv3 = ConvNormAct(bottleneck_chs, out_chs, kernel_size=1, apply_act=False, **cargs) self.act3 = nn.Identity() if linear_out else act_layer() self.downsample = create_shortcut( downsample, in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation, norm_layer=norm_layer, ) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.se(x) x = self.conv3(x) if self.downsample is not None: # NOTE stuck with downsample as the attr name due to weight compatibility # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() x = self.drop_path(x) + self.downsample(shortcut) x = self.act3(x) return x class PreBottleneck(nn.Module): """ RegNet Bottleneck This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels. """ def __init__( self, in_chs, out_chs, stride=1, dilation=(1, 1), bottle_ratio=1, group_size=1, se_ratio=0.25, downsample='conv1x1', linear_out=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_block=None, drop_path_rate=0., ): super(PreBottleneck, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer) bottleneck_chs = int(round(out_chs * bottle_ratio)) groups = bottleneck_chs // group_size self.norm1 = norm_act_layer(in_chs) self.conv1 = create_conv2d(in_chs, bottleneck_chs, kernel_size=1) self.norm2 = norm_act_layer(bottleneck_chs) self.conv2 = create_conv2d( bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=groups, ) if se_ratio: se_channels = int(round(in_chs * se_ratio)) self.se = SEModule(bottleneck_chs, rd_channels=se_channels, act_layer=act_layer) else: self.se = nn.Identity() self.norm3 = norm_act_layer(bottleneck_chs) self.conv3 = create_conv2d(bottleneck_chs, out_chs, kernel_size=1) self.downsample = create_shortcut( downsample, in_chs, out_chs, kernel_size=1, stride=stride, dilation=dilation, preact=True, ) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): pass def forward(self, x): x = self.norm1(x) shortcut = x x = self.conv1(x) x = self.norm2(x) x = self.conv2(x) x = self.se(x) x = self.norm3(x) x = self.conv3(x) if self.downsample is not None: # NOTE stuck with downsample as the attr name due to weight compatibility # now represents the shortcut, no shortcut if None, and non-downsample shortcut == nn.Identity() x = self.drop_path(x) + self.downsample(shortcut) return x class RegStage(nn.Module): """Stage (sequence of blocks w/ the same output shape).""" def __init__( self, depth, in_chs, out_chs, stride, dilation, drop_path_rates=None, block_fn=Bottleneck, **block_kwargs, ): super(RegStage, self).__init__() self.grad_checkpointing = False first_dilation = 1 if dilation in (1, 2) else 2 for i in range(depth): block_stride = stride if i == 0 else 1 block_in_chs = in_chs if i == 0 else out_chs block_dilation = (first_dilation, dilation) dpr = drop_path_rates[i] if drop_path_rates is not None else 0. name = "b{}".format(i + 1) self.add_module( name, block_fn( block_in_chs, out_chs, stride=block_stride, dilation=block_dilation, drop_path_rate=dpr, **block_kwargs, ) ) first_dilation = dilation def forward(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.children(), x) else: for block in self.children(): x = block(x) return x class RegNet(nn.Module): """RegNet-X, Y, and Z Models Paper: https://arxiv.org/abs/2003.13678 Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py """ def __init__( self, cfg: RegNetCfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0., drop_path_rate=0., zero_init_last=True, **kwargs, ): """ Args: cfg (RegNetCfg): Model architecture configuration in_chans (int): Number of input channels (default: 3) num_classes (int): Number of classifier classes (default: 1000) output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) global_pool (str): Global pooling type (default: 'avg') drop_rate (float): Dropout rate (default: 0.) drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) zero_init_last (bool): Zero-init last weight of residual path kwargs (dict): Extra kwargs overlayed onto cfg """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride in (8, 16, 32) cfg = replace(cfg, **kwargs) # update cfg with extra passed kwargs # Construct the stem stem_width = cfg.stem_width na_args = dict(act_layer=cfg.act_layer, norm_layer=cfg.norm_layer) if cfg.preact: self.stem = create_conv2d(in_chans, stem_width, 3, stride=2) else: self.stem = ConvNormAct(in_chans, stem_width, 3, stride=2, **na_args) self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')] # Construct the stages prev_width = stem_width curr_stride = 2 per_stage_args, common_args = self._get_stage_args( cfg, output_stride=output_stride, drop_path_rate=drop_path_rate, ) assert len(per_stage_args) == 4 block_fn = PreBottleneck if cfg.preact else Bottleneck for i, stage_args in enumerate(per_stage_args): stage_name = "s{}".format(i + 1) self.add_module( stage_name, RegStage( in_chs=prev_width, block_fn=block_fn, **stage_args, **common_args, ) ) prev_width = stage_args['out_chs'] curr_stride *= stage_args['stride'] self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)] # Construct the head if cfg.num_features: self.final_conv = ConvNormAct(prev_width, cfg.num_features, kernel_size=1, **na_args) self.num_features = cfg.num_features else: final_act = cfg.linear_out or cfg.preact self.final_conv = get_act_layer(cfg.act_layer)() if final_act else nn.Identity() self.num_features = prev_width self.head = ClassifierHead( in_features=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate, ) named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) def _get_stage_args(self, cfg: RegNetCfg, default_stride=2, output_stride=32, drop_path_rate=0.): # Generate RegNet ws per block widths, num_stages, stage_gs = generate_regnet(cfg.wa, cfg.w0, cfg.wm, cfg.depth, cfg.group_size) # Convert to per stage format stage_widths, stage_depths = np.unique(widths, return_counts=True) stage_br = [cfg.bottle_ratio for _ in range(num_stages)] stage_strides = [] stage_dilations = [] net_stride = 2 dilation = 1 for _ in range(num_stages): if net_stride >= output_stride: dilation *= default_stride stride = 1 else: stride = default_stride net_stride *= stride stage_strides.append(stride) stage_dilations.append(dilation) stage_dpr = np.split(np.linspace(0, drop_path_rate, sum(stage_depths)), np.cumsum(stage_depths[:-1])) # Adjust the compatibility of ws and gws stage_widths, stage_gs = adjust_widths_groups_comp( stage_widths, stage_br, stage_gs, min_ratio=cfg.group_min_ratio) arg_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_size', 'drop_path_rates'] per_stage_args = [ dict(zip(arg_names, params)) for params in zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_br, stage_gs, stage_dpr) ] common_args = dict( downsample=cfg.downsample, se_ratio=cfg.se_ratio, linear_out=cfg.linear_out, act_layer=cfg.act_layer, norm_layer=cfg.norm_layer, ) return per_stage_args, common_args @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^s(\d+)' if coarse else r'^s(\d+)\.b(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in list(self.children())[1:-1]: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.s1(x) x = self.s2(x) x = self.s3(x) x = self.s4(x) x = self.final_conv(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name='', zero_init_last=False): if isinstance(module, nn.Conv2d): fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels fan_out //= module.groups module.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Linear): nn.init.normal_(module.weight, mean=0.0, std=0.01) if module.bias is not None: nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() def _filter_fn(state_dict): state_dict = state_dict.get('model', state_dict) replaces = [ ('f.a.0', 'conv1.conv'), ('f.a.1', 'conv1.bn'), ('f.b.0', 'conv2.conv'), ('f.b.1', 'conv2.bn'), ('f.final_bn', 'conv3.bn'), ('f.se.excitation.0', 'se.fc1'), ('f.se.excitation.2', 'se.fc2'), ('f.se', 'se'), ('f.c.0', 'conv3.conv'), ('f.c.1', 'conv3.bn'), ('f.c', 'conv3.conv'), ('proj.0', 'downsample.conv'), ('proj.1', 'downsample.bn'), ('proj', 'downsample.conv'), ] if 'classy_state_dict' in state_dict: # classy-vision & vissl (SEER) weights import re state_dict = state_dict['classy_state_dict']['base_model']['model'] out = {} for k, v in state_dict['trunk'].items(): k = k.replace('_feature_blocks.conv1.stem.0', 'stem.conv') k = k.replace('_feature_blocks.conv1.stem.1', 'stem.bn') k = re.sub( r'^_feature_blocks.res\d.block(\d)-(\d+)', lambda x: f's{int(x.group(1))}.b{int(x.group(2)) + 1}', k) k = re.sub(r's(\d)\.b(\d+)\.bn', r's\1.b\2.downsample.bn', k) for s, r in replaces: k = k.replace(s, r) out[k] = v for k, v in state_dict['heads'].items(): if 'projection_head' in k or 'prototypes' in k: continue k = k.replace('0.clf.0', 'head.fc') out[k] = v return out if 'stem.0.weight' in state_dict: # torchvision weights import re out = {} for k, v in state_dict.items(): k = k.replace('stem.0', 'stem.conv') k = k.replace('stem.1', 'stem.bn') k = re.sub( r'trunk_output.block(\d)\.block(\d+)\-(\d+)', lambda x: f's{int(x.group(1))}.b{int(x.group(3)) + 1}', k) for s, r in replaces: k = k.replace(s, r) k = k.replace('fc.', 'head.fc.') out[k] = v return out return state_dict # Model FLOPS = three trailing digits * 10^8 model_cfgs = dict( # RegNet-X regnetx_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13), regnetx_004=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22), regnetx_004_tv=RegNetCfg(w0=24, wa=24.48, wm=2.54, group_size=16, depth=22, group_min_ratio=0.9), regnetx_006=RegNetCfg(w0=48, wa=36.97, wm=2.24, group_size=24, depth=16), regnetx_008=RegNetCfg(w0=56, wa=35.73, wm=2.28, group_size=16, depth=16), regnetx_016=RegNetCfg(w0=80, wa=34.01, wm=2.25, group_size=24, depth=18), regnetx_032=RegNetCfg(w0=88, wa=26.31, wm=2.25, group_size=48, depth=25), regnetx_040=RegNetCfg(w0=96, wa=38.65, wm=2.43, group_size=40, depth=23), regnetx_064=RegNetCfg(w0=184, wa=60.83, wm=2.07, group_size=56, depth=17), regnetx_080=RegNetCfg(w0=80, wa=49.56, wm=2.88, group_size=120, depth=23), regnetx_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19), regnetx_160=RegNetCfg(w0=216, wa=55.59, wm=2.1, group_size=128, depth=22), regnetx_320=RegNetCfg(w0=320, wa=69.86, wm=2.0, group_size=168, depth=23), # RegNet-Y regnety_002=RegNetCfg(w0=24, wa=36.44, wm=2.49, group_size=8, depth=13, se_ratio=0.25), regnety_004=RegNetCfg(w0=48, wa=27.89, wm=2.09, group_size=8, depth=16, se_ratio=0.25), regnety_006=RegNetCfg(w0=48, wa=32.54, wm=2.32, group_size=16, depth=15, se_ratio=0.25), regnety_008=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25), regnety_008_tv=RegNetCfg(w0=56, wa=38.84, wm=2.4, group_size=16, depth=14, se_ratio=0.25, group_min_ratio=0.9), regnety_016=RegNetCfg(w0=48, wa=20.71, wm=2.65, group_size=24, depth=27, se_ratio=0.25), regnety_032=RegNetCfg(w0=80, wa=42.63, wm=2.66, group_size=24, depth=21, se_ratio=0.25), regnety_040=RegNetCfg(w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25), regnety_064=RegNetCfg(w0=112, wa=33.22, wm=2.27, group_size=72, depth=25, se_ratio=0.25), regnety_080=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25), regnety_080_tv=RegNetCfg(w0=192, wa=76.82, wm=2.19, group_size=56, depth=17, se_ratio=0.25, group_min_ratio=0.9), regnety_120=RegNetCfg(w0=168, wa=73.36, wm=2.37, group_size=112, depth=19, se_ratio=0.25), regnety_160=RegNetCfg(w0=200, wa=106.23, wm=2.48, group_size=112, depth=18, se_ratio=0.25), regnety_320=RegNetCfg(w0=232, wa=115.89, wm=2.53, group_size=232, depth=20, se_ratio=0.25), regnety_640=RegNetCfg(w0=352, wa=147.48, wm=2.4, group_size=328, depth=20, se_ratio=0.25), regnety_1280=RegNetCfg(w0=456, wa=160.83, wm=2.52, group_size=264, depth=27, se_ratio=0.25), regnety_2560=RegNetCfg(w0=640, wa=230.83, wm=2.53, group_size=373, depth=27, se_ratio=0.25), #regnety_2560=RegNetCfg(w0=640, wa=124.47, wm=2.04, group_size=848, depth=27, se_ratio=0.25), # Experimental regnety_040_sgn=RegNetCfg( w0=96, wa=31.41, wm=2.24, group_size=64, depth=22, se_ratio=0.25, act_layer='silu', norm_layer=partial(GroupNormAct, group_size=16)), # regnetv = 'preact regnet y' regnetv_040=RegNetCfg( depth=22, w0=96, wa=31.41, wm=2.24, group_size=64, se_ratio=0.25, preact=True, act_layer='silu'), regnetv_064=RegNetCfg( depth=25, w0=112, wa=33.22, wm=2.27, group_size=72, se_ratio=0.25, preact=True, act_layer='silu', downsample='avg'), # RegNet-Z (unverified) regnetz_005=RegNetCfg( depth=21, w0=16, wa=10.7, wm=2.51, group_size=4, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=1024, act_layer='silu', ), regnetz_040=RegNetCfg( depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=0, act_layer='silu', ), regnetz_040_h=RegNetCfg( depth=28, w0=48, wa=14.5, wm=2.226, group_size=8, bottle_ratio=4.0, se_ratio=0.25, downsample=None, linear_out=True, num_features=1536, act_layer='silu', ), ) def _create_regnet(variant, pretrained, **kwargs): return build_model_with_cfg( RegNet, variant, pretrained, model_cfg=model_cfgs[variant], pretrained_filter_fn=_filter_fn, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'test_input_size': (3, 288, 288), 'crop_pct': 0.95, 'test_crop_pct': 1.0, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } def _cfgpyc(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'mit', 'origin_url': 'https://github.com/facebookresearch/pycls', **kwargs } def _cfgtv2(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.965, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'bsd-3-clause', 'origin_url': 'https://github.com/pytorch/vision', **kwargs } default_cfgs = generate_default_cfgs({ # timm trained models 'regnety_032.ra_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'), 'regnety_040.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_040_ra3-670e1166.pth'), 'regnety_064.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_064_ra3-aa26dc7d.pth'), 'regnety_080.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnety_080_ra3-1fdc4344.pth'), 'regnety_120.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'regnety_160.sw_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), 'regnety_160.lion_in12k_ft_in1k': _cfg(hf_hub_id='timm/'), # timm in12k pretrain 'regnety_120.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), 'regnety_160.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821), # timm custom arch (v and z guess) + trained models 'regnety_040_sgn.untrained': _cfg(url=''), 'regnetv_040.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_040_ra3-c248f51f.pth', first_conv='stem'), 'regnetv_064.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetv_064_ra3-530616c2.pth', first_conv='stem'), 'regnetz_005.untrained': _cfg(url=''), 'regnetz_040.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040_ra3-9007edf5.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), 'regnetz_040_h.ra3_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-tpu-weights/regnetz_040h_ra3-f594343b.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320)), # used in DeiT for distillation (from Facebook DeiT GitHub repository) 'regnety_160.deit_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/deit/regnety_160-a5fe301d.pth'), 'regnetx_004_tv.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_400mf-62229a5f.pth'), 'regnetx_008.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_800mf-94a99ebd.pth'), 'regnetx_016.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_1_6gf-a12f2b72.pth'), 'regnetx_032.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_3_2gf-7071aa85.pth'), 'regnetx_080.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_8gf-2b70d774.pth'), 'regnetx_160.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_16gf-ba3796d7.pth'), 'regnetx_320.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_x_32gf-6eb8fdc6.pth'), 'regnety_004.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_400mf-e6988f5f.pth'), 'regnety_008_tv.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_800mf-58fc7688.pth'), 'regnety_016.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_1_6gf-0d7bc02a.pth'), 'regnety_032.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_3_2gf-9180c971.pth'), 'regnety_080_tv.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_8gf-dc2b1b54.pth'), 'regnety_160.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf-3e4a00f9.pth'), 'regnety_320.tv2_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf-8db6d4b5.pth'), 'regnety_160.swag_ft_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf_swag-43afe44d.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_320.swag_ft_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf_swag-04fdfa75.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_1280.swag_ft_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_128gf_swag-c8ce3e52.pth', license='cc-by-nc-4.0', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_160.swag_lc_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_16gf_lc_swag-f3ec0043.pth', license='cc-by-nc-4.0'), 'regnety_320.swag_lc_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_32gf_lc_swag-e1583746.pth', license='cc-by-nc-4.0'), 'regnety_1280.swag_lc_in1k': _cfgtv2( hf_hub_id='timm/', url='https://download.pytorch.org/models/regnet_y_128gf_lc_swag-cbe8ce12.pth', license='cc-by-nc-4.0'), 'regnety_320.seer_ft_in1k': _cfgtv2( hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_640.seer_ft_in1k': _cfgtv2( hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_1280.seer_ft_in1k': _cfgtv2( hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_2560.seer_ft_in1k': _cfgtv2( hf_hub_id='timm/', license='other', origin_url='https://github.com/facebookresearch/vissl', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet256_finetuned_in1k_model_final_checkpoint_phase38.torch', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'regnety_320.seer': _cfgtv2( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnety_640.seer': _cfgtv2( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnety_1280.seer': _cfgtv2( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), # FIXME invalid weight <-> model match, mistake on their end #'regnety_2560.seer': _cfgtv2( # url='https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_cosine_rg256gf_noBNhead_wd1e5_fairstore_bs16_node64_sinkhorn10_proto16k_apex_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', # num_classes=0, license='other', origin_url='https://github.com/facebookresearch/vissl'), 'regnetx_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnetx_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_002.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_004.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_006.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_008.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_016.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_032.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_040.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_064.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_080.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_120.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_160.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), 'regnety_320.pycls_in1k': _cfgpyc(hf_hub_id='timm/'), }) @register_model def regnetx_002(pretrained=False, **kwargs) -> RegNet: """RegNetX-200MF""" return _create_regnet('regnetx_002', pretrained, **kwargs) @register_model def regnetx_004(pretrained=False, **kwargs) -> RegNet: """RegNetX-400MF""" return _create_regnet('regnetx_004', pretrained, **kwargs) @register_model def regnetx_004_tv(pretrained=False, **kwargs) -> RegNet: """RegNetX-400MF w/ torchvision group rounding""" return _create_regnet('regnetx_004_tv', pretrained, **kwargs) @register_model def regnetx_006(pretrained=False, **kwargs) -> RegNet: """RegNetX-600MF""" return _create_regnet('regnetx_006', pretrained, **kwargs) @register_model def regnetx_008(pretrained=False, **kwargs) -> RegNet: """RegNetX-800MF""" return _create_regnet('regnetx_008', pretrained, **kwargs) @register_model def regnetx_016(pretrained=False, **kwargs) -> RegNet: """RegNetX-1.6GF""" return _create_regnet('regnetx_016', pretrained, **kwargs) @register_model def regnetx_032(pretrained=False, **kwargs) -> RegNet: """RegNetX-3.2GF""" return _create_regnet('regnetx_032', pretrained, **kwargs) @register_model def regnetx_040(pretrained=False, **kwargs) -> RegNet: """RegNetX-4.0GF""" return _create_regnet('regnetx_040', pretrained, **kwargs) @register_model def regnetx_064(pretrained=False, **kwargs) -> RegNet: """RegNetX-6.4GF""" return _create_regnet('regnetx_064', pretrained, **kwargs) @register_model def regnetx_080(pretrained=False, **kwargs) -> RegNet: """RegNetX-8.0GF""" return _create_regnet('regnetx_080', pretrained, **kwargs) @register_model def regnetx_120(pretrained=False, **kwargs) -> RegNet: """RegNetX-12GF""" return _create_regnet('regnetx_120', pretrained, **kwargs) @register_model def regnetx_160(pretrained=False, **kwargs) -> RegNet: """RegNetX-16GF""" return _create_regnet('regnetx_160', pretrained, **kwargs) @register_model def regnetx_320(pretrained=False, **kwargs) -> RegNet: """RegNetX-32GF""" return _create_regnet('regnetx_320', pretrained, **kwargs) @register_model def regnety_002(pretrained=False, **kwargs) -> RegNet: """RegNetY-200MF""" return _create_regnet('regnety_002', pretrained, **kwargs) @register_model def regnety_004(pretrained=False, **kwargs) -> RegNet: """RegNetY-400MF""" return _create_regnet('regnety_004', pretrained, **kwargs) @register_model def regnety_006(pretrained=False, **kwargs) -> RegNet: """RegNetY-600MF""" return _create_regnet('regnety_006', pretrained, **kwargs) @register_model def regnety_008(pretrained=False, **kwargs) -> RegNet: """RegNetY-800MF""" return _create_regnet('regnety_008', pretrained, **kwargs) @register_model def regnety_008_tv(pretrained=False, **kwargs) -> RegNet: """RegNetY-800MF w/ torchvision group rounding""" return _create_regnet('regnety_008_tv', pretrained, **kwargs) @register_model def regnety_016(pretrained=False, **kwargs) -> RegNet: """RegNetY-1.6GF""" return _create_regnet('regnety_016', pretrained, **kwargs) @register_model def regnety_032(pretrained=False, **kwargs) -> RegNet: """RegNetY-3.2GF""" return _create_regnet('regnety_032', pretrained, **kwargs) @register_model def regnety_040(pretrained=False, **kwargs) -> RegNet: """RegNetY-4.0GF""" return _create_regnet('regnety_040', pretrained, **kwargs) @register_model def regnety_064(pretrained=False, **kwargs) -> RegNet: """RegNetY-6.4GF""" return _create_regnet('regnety_064', pretrained, **kwargs) @register_model def regnety_080(pretrained=False, **kwargs) -> RegNet: """RegNetY-8.0GF""" return _create_regnet('regnety_080', pretrained, **kwargs) @register_model def regnety_080_tv(pretrained=False, **kwargs) -> RegNet: """RegNetY-8.0GF w/ torchvision group rounding""" return _create_regnet('regnety_080_tv', pretrained, **kwargs) @register_model def regnety_120(pretrained=False, **kwargs) -> RegNet: """RegNetY-12GF""" return _create_regnet('regnety_120', pretrained, **kwargs) @register_model def regnety_160(pretrained=False, **kwargs) -> RegNet: """RegNetY-16GF""" return _create_regnet('regnety_160', pretrained, **kwargs) @register_model def regnety_320(pretrained=False, **kwargs) -> RegNet: """RegNetY-32GF""" return _create_regnet('regnety_320', pretrained, **kwargs) @register_model def regnety_640(pretrained=False, **kwargs) -> RegNet: """RegNetY-64GF""" return _create_regnet('regnety_640', pretrained, **kwargs) @register_model def regnety_1280(pretrained=False, **kwargs) -> RegNet: """RegNetY-128GF""" return _create_regnet('regnety_1280', pretrained, **kwargs) @register_model def regnety_2560(pretrained=False, **kwargs) -> RegNet: """RegNetY-256GF""" return _create_regnet('regnety_2560', pretrained, **kwargs) @register_model def regnety_040_sgn(pretrained=False, **kwargs) -> RegNet: """RegNetY-4.0GF w/ GroupNorm """ return _create_regnet('regnety_040_sgn', pretrained, **kwargs) @register_model def regnetv_040(pretrained=False, **kwargs) -> RegNet: """RegNetV-4.0GF (pre-activation)""" return _create_regnet('regnetv_040', pretrained, **kwargs) @register_model def regnetv_064(pretrained=False, **kwargs) -> RegNet: """RegNetV-6.4GF (pre-activation)""" return _create_regnet('regnetv_064', pretrained, **kwargs) @register_model def regnetz_005(pretrained=False, **kwargs) -> RegNet: """RegNetZ-500MF NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py but it's not clear it is equivalent to paper model as not detailed in the paper. """ return _create_regnet('regnetz_005', pretrained, zero_init_last=False, **kwargs) @register_model def regnetz_040(pretrained=False, **kwargs) -> RegNet: """RegNetZ-4.0GF NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py but it's not clear it is equivalent to paper model as not detailed in the paper. """ return _create_regnet('regnetz_040', pretrained, zero_init_last=False, **kwargs) @register_model def regnetz_040_h(pretrained=False, **kwargs) -> RegNet: """RegNetZ-4.0GF NOTE: config found in https://github.com/facebookresearch/ClassyVision/blob/main/classy_vision/models/regnet.py but it's not clear it is equivalent to paper model as not detailed in the paper. """ return _create_regnet('regnetz_040_h', pretrained, zero_init_last=False, **kwargs) register_model_deprecations(__name__, { 'regnetz_040h': 'regnetz_040_h', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/repvit.py
""" RepViT Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective` - https://arxiv.org/abs/2307.09283 @misc{wang2023repvit, title={RepViT: Revisiting Mobile CNN From ViT Perspective}, author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding}, year={2023}, eprint={2307.09283}, archivePrefix={arXiv}, primaryClass={cs.CV} } Adapted from official impl at https://github.com/jameslahm/RepViT """ __all__ = ['RepVit'] import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._registry import register_model, generate_default_cfgs from ._builder import build_model_with_cfg from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple from ._manipulate import checkpoint_seq import torch class ConvNorm(nn.Sequential): def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False)) self.add_module('bn', nn.BatchNorm2d(out_dim)) nn.init.constant_(self.bn.weight, bn_weight_init) nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d( w.size(1) * self.c.groups, w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups, device=c.weight.device, ) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(nn.Sequential): def __init__(self, in_dim, out_dim, bias=True, std=0.02): super().__init__() self.add_module('bn', nn.BatchNorm1d(in_dim)) self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias)) trunc_normal_(self.l.weight, std=std) if bias: nn.init.constant_(self.l.bias, 0) @torch.no_grad() def fuse(self): bn, l = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.l.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.l.bias m = nn.Linear(w.size(1), w.size(0), device=l.weight.device) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class RepVggDw(nn.Module): def __init__(self, ed, kernel_size): super().__init__() self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed) self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed) self.dim = ed def forward(self, x): return self.conv(x) + self.conv1(x) + x @torch.no_grad() def fuse(self): conv = self.conv.fuse() conv1 = self.conv1.fuse() conv_w = conv.weight conv_b = conv.bias conv1_w = conv1.weight conv1_b = conv1.bias conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1]) identity = nn.functional.pad( torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1] ) final_conv_w = conv_w + conv1_w + identity final_conv_b = conv_b + conv1_b conv.weight.data.copy_(final_conv_w) conv.bias.data.copy_(final_conv_b) return conv class RepVitMlp(nn.Module): def __init__(self, in_dim, hidden_dim, act_layer): super().__init__() self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0) self.act = act_layer() self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0) def forward(self, x): return self.conv2(self.act(self.conv1(x))) class RepViTBlock(nn.Module): def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer): super(RepViTBlock, self).__init__() self.token_mixer = RepVggDw(in_dim, kernel_size) self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity() self.channel_mixer = RepVitMlp(in_dim, in_dim * mlp_ratio, act_layer) def forward(self, x): x = self.token_mixer(x) x = self.se(x) identity = x x = self.channel_mixer(x) return identity + x class RepVitStem(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act1 = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) self.stride = 4 def forward(self, x): return self.conv2(self.act1(self.conv1(x))) class RepVitDownsample(nn.Module): def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer): super().__init__() self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer) self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim) self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1) self.ffn = RepVitMlp(out_dim, out_dim * mlp_ratio, act_layer) def forward(self, x): x = self.pre_block(x) x = self.spatial_downsample(x) x = self.channel_downsample(x) identity = x x = self.ffn(x) return x + identity class RepVitClassifier(nn.Module): def __init__(self, dim, num_classes, distillation=False, drop=0.): super().__init__() self.head_drop = nn.Dropout(drop) self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() self.distillation = distillation self.distilled_training = False self.num_classes = num_classes if distillation: self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x): x = self.head_drop(x) if self.distillation: x1, x2 = self.head(x), self.head_dist(x) if self.training and self.distilled_training and not torch.jit.is_scripting(): return x1, x2 else: return (x1 + x2) / 2 else: x = self.head(x) return x @torch.no_grad() def fuse(self): if not self.num_classes > 0: return nn.Identity() head = self.head.fuse() if self.distillation: head_dist = self.head_dist.fuse() head.weight += head_dist.weight head.bias += head_dist.bias head.weight /= 2 head.bias /= 2 return head else: return head class RepVitStage(nn.Module): def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True): super().__init__() if downsample: self.downsample = RepVitDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer) else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] use_se = True for _ in range(depth): blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer)) use_se = not use_se self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class RepVit(nn.Module): def __init__( self, in_chans=3, img_size=224, embed_dim=(48,), depth=(2,), mlp_ratio=2, global_pool='avg', kernel_size=3, num_classes=1000, act_layer=nn.GELU, distillation=True, drop_rate=0., ): super(RepVit, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.embed_dim = embed_dim self.num_classes = num_classes in_dim = embed_dim[0] self.stem = RepVitStem(in_chans, in_dim, act_layer) stride = self.stem.stride resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) num_stages = len(embed_dim) mlp_ratios = to_ntuple(num_stages)(mlp_ratio) self.feature_info = [] stages = [] for i in range(num_stages): downsample = True if i != 0 else False stages.append( RepVitStage( in_dim, embed_dim[i], depth[i], mlp_ratio=mlp_ratios[i], act_layer=act_layer, kernel_size=kernel_size, downsample=downsample, ) ) stage_stride = 2 if downsample else 1 stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) self.num_features = embed_dim[-1] self.head_drop = nn.Dropout(drop_rate) self.head = RepVitClassifier(embed_dim[-1], num_classes, distillation) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None, distillation=False): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = ( RepVitClassifier(self.embed_dim[-1], num_classes, distillation) if num_classes > 0 else nn.Identity() ) @torch.jit.ignore def set_distilled_training(self, enable=True): self.head.distilled_training = enable def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean((2, 3), keepdim=False) x = self.head_drop(x) return self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x @torch.no_grad() def fuse(self): def fuse_children(net): for child_name, child in net.named_children(): if hasattr(child, 'fuse'): fused = child.fuse() setattr(net, child_name, fused) fuse_children(fused) else: fuse_children(child) fuse_children(self) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.c', 'classifier': ('head.head.l', 'head.head_dist.l'), **kwargs, } default_cfgs = generate_default_cfgs( { 'repvit_m1.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m1_distill_300_timm.pth' ), 'repvit_m2.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m2_distill_300_timm.pth' ), 'repvit_m3.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m3_distill_300_timm.pth' ), } ) def _create_repvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( RepVit, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model @register_model def repvit_m1(pretrained=False, **kwargs): """ Constructs a RepViT-M1 model """ model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2(pretrained=False, **kwargs): """ Constructs a RepViT-M2 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2)) return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m3(pretrained=False, **kwargs): """ Constructs a RepViT-M3 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2)) return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/res2net.py
""" Res2Net and Res2NeXt Adapted from Official Pytorch impl at: https://github.com/gasvn/Res2Net/ Paper: `Res2Net: A New Multi-scale Backbone Architecture` - https://arxiv.org/abs/1904.01169 """ import math import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet __all__ = [] class Bottle2neck(nn.Module): """ Res2Net/Res2NeXT Bottleneck Adapted from https://github.com/gasvn/Res2Net/blob/master/res2net.py """ expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=26, scale=4, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=None, attn_layer=None, **_, ): super(Bottle2neck, self).__init__() self.scale = scale self.is_first = stride > 1 or downsample is not None self.num_scales = max(1, scale - 1) width = int(math.floor(planes * (base_width / 64.0))) * cardinality self.width = width outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = nn.Conv2d(inplanes, width * scale, kernel_size=1, bias=False) self.bn1 = norm_layer(width * scale) convs = [] bns = [] for i in range(self.num_scales): convs.append(nn.Conv2d( width, width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)) bns.append(norm_layer(width)) self.convs = nn.ModuleList(convs) self.bns = nn.ModuleList(bns) if self.is_first: # FIXME this should probably have count_include_pad=False, but hurts original weights self.pool = nn.AvgPool2d(kernel_size=3, stride=stride, padding=1) else: self.pool = None self.conv3 = nn.Conv2d(width * scale, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = attn_layer(outplanes) if attn_layer is not None else None self.relu = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) spx = torch.split(out, self.width, 1) spo = [] sp = spx[0] # redundant, for torchscript for i, (conv, bn) in enumerate(zip(self.convs, self.bns)): if i == 0 or self.is_first: sp = spx[i] else: sp = sp + spx[i] sp = conv(sp) sp = bn(sp) sp = self.relu(sp) spo.append(sp) if self.scale > 1: if self.pool is not None: # self.is_first == True, None check for torchscript spo.append(self.pool(spx[-1])) else: spo.append(spx[-1]) out = torch.cat(spo, 1) out = self.conv3(out) out = self.bn3(out) if self.se is not None: out = self.se(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.relu(out) return out def _create_res2net(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'res2net50_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_48w_2s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_14w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_6s.in1k': _cfg(hf_hub_id='timm/'), 'res2net50_26w_8s.in1k': _cfg(hf_hub_id='timm/'), 'res2net101_26w_4s.in1k': _cfg(hf_hub_id='timm/'), 'res2next50.in1k': _cfg(hf_hub_id='timm/'), 'res2net50d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), 'res2net101d.in1k': _cfg(hf_hub_id='timm/', first_conv='conv1.0'), }) @register_model def res2net50_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net50_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101_26w_4s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-101 26w4s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, block_args=dict(scale=4)) return _create_res2net('res2net101_26w_4s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_6s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w6s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=6)) return _create_res2net('res2net50_26w_6s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_26w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 26w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, block_args=dict(scale=8)) return _create_res2net('res2net50_26w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_48w_2s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 48w2s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=48, block_args=dict(scale=2)) return _create_res2net('res2net50_48w_2s', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50_14w_8s(pretrained=False, **kwargs) -> ResNet: """Constructs a Res2Net-50 14w8s model. """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=14, block_args=dict(scale=8)) return _create_res2net('res2net50_14w_8s', pretrained, **dict(model_args, **kwargs)) @register_model def res2next50(pretrained=False, **kwargs) -> ResNet: """Construct Res2NeXt-50 4s """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=4, cardinality=8, block_args=dict(scale=4)) return _create_res2net('res2next50', pretrained, **dict(model_args, **kwargs)) @register_model def res2net50d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 6, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net50d', pretrained, **dict(model_args, **kwargs)) @register_model def res2net101d(pretrained=False, **kwargs) -> ResNet: """Construct Res2Net-50 """ model_args = dict( block=Bottle2neck, layers=[3, 4, 23, 3], base_width=26, stem_type='deep', avg_down=True, stem_width=32, block_args=dict(scale=4)) return _create_res2net('res2net101d', pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/resnest.py
""" ResNeSt Models Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang Modified for torchscript compat, and consistency with timm by Ross Wightman """ from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SplitAttn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class ResNestBottleneck(nn.Module): """ResNet Bottleneck """ # pylint: disable=unused-argument expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(ResNestBottleneck, self).__init__() assert reduce_first == 1 # not supported assert attn_layer is None # not supported assert aa_layer is None # TODO not yet supported assert drop_path is None # TODO not yet supported group_width = int(planes * (base_width / 64.)) * cardinality first_dilation = first_dilation or dilation if avd and (stride > 1 or is_first): avd_stride = stride stride = 1 else: avd_stride = 0 self.radix = radix self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) self.bn1 = norm_layer(group_width) self.act1 = act_layer(inplace=True) self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None if self.radix >= 1: self.conv2 = SplitAttn( group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block) self.bn2 = nn.Identity() self.drop_block = nn.Identity() self.act2 = nn.Identity() else: self.conv2 = nn.Conv2d( group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(group_width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) self.bn3 = norm_layer(planes*4) self.act3 = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.act1(out) if self.avd_first is not None: out = self.avd_first(out) out = self.conv2(out) out = self.bn2(out) out = self.drop_block(out) out = self.act2(out) if self.avd_last is not None: out = self.avd_last(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.act3(out) return out def _create_resnest(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest50d.in1k': _cfg(hf_hub_id='timm/'), 'resnest101e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'resnest200e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), 'resnest269e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), 'resnest50d_4s2x40d.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'resnest50d_1s4x24d.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic') }) @register_model def resnest14d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-14d model. Weights ported from GluonCV. """ model_kwargs = dict( block=ResNestBottleneck, layers=[1, 1, 1, 1], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest26d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-26d model. Weights ported from GluonCV. """ model_kwargs = dict( block=ResNestBottleneck, layers=[2, 2, 2, 2], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest101e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 23, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest200e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 24, 36, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest269e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 30, 48, 8], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet: """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, block_args=dict(radix=4, avd=True, avd_first=True)) return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet: """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, block_args=dict(radix=1, avd=True, avd_first=True)) return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/resnet.py
"""PyTorch ResNet This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with additional dropout and dynamic global avg/max pool. ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman Copyright 2019, Ross Wightman """ import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, GroupNorm, create_attn, get_attn, \ get_act_layer, get_norm_layer, create_classifier from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this def get_padding(kernel_size, stride, dilation=1): padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding def create_aa(aa_layer, channels, stride=2, enable=True): if not aa_layer or not enable: return nn.Identity() if issubclass(aa_layer, nn.AvgPool2d): return aa_layer(stride) else: return aa_layer(channels=channels, stride=stride) class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(BasicBlock, self).__init__() assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock does not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d( inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, bias=False) self.bn1 = norm_layer(first_planes) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act1 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=first_planes, stride=stride, enable=use_aa) self.conv2 = nn.Conv2d( first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False) self.bn2 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act2 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self): if getattr(self.bn2, 'weight', None) is not None: nn.init.zeros_(self.bn2.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.drop_block(x) x = self.act1(x) x = self.aa(x) x = self.conv2(x) x = self.bn2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act2(x) return x class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(Bottleneck, self).__init__() width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation) self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False) self.bn1 = norm_layer(first_planes) self.act1 = act_layer(inplace=True) self.conv2 = nn.Conv2d( first_planes, width, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.aa = create_aa(aa_layer, channels=width, stride=stride, enable=use_aa) self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False) self.bn3 = norm_layer(outplanes) self.se = create_attn(attn_layer, outplanes) self.act3 = act_layer(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.drop_path = drop_path def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.drop_block(x) x = self.act2(x) x = self.aa(x) x = self.conv3(x) x = self.bn3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act3(x) return x def downsample_conv( in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None, ): norm_layer = norm_layer or nn.BatchNorm2d kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1 p = get_padding(kernel_size, stride, first_dilation) return nn.Sequential(*[ nn.Conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False), norm_layer(out_channels) ]) def downsample_avg( in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None, ): norm_layer = norm_layer or nn.BatchNorm2d avg_stride = stride if dilation == 1 else 1 if stride == 1 and dilation == 1: pool = nn.Identity() else: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) return nn.Sequential(*[ pool, nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False), norm_layer(out_channels) ]) def drop_blocks(drop_prob=0.): return [ None, None, partial(DropBlock2d, drop_prob=drop_prob, block_size=5, gamma_scale=0.25) if drop_prob else None, partial(DropBlock2d, drop_prob=drop_prob, block_size=3, gamma_scale=1.00) if drop_prob else None] def make_blocks( block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32, down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs, ): stages = [] feature_info = [] net_num_blocks = sum(block_repeats) net_block_idx = 0 net_stride = 4 dilation = prev_dilation = 1 for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))): stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it stride = 1 if stage_idx == 0 else 2 if net_stride >= output_stride: dilation *= stride stride = 1 else: net_stride *= stride downsample = None if stride != 1 or inplanes != planes * block_fn.expansion: down_kwargs = dict( in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size, stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'), ) downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs) block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs) blocks = [] for block_idx in range(num_blocks): downsample = downsample if block_idx == 0 else None stride = stride if block_idx == 0 else 1 block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule blocks.append(block_fn( inplanes, planes, stride, downsample, first_dilation=prev_dilation, drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs, )) prev_dilation = dilation inplanes = planes * block_fn.expansion net_block_idx += 1 stages.append((stage_name, nn.Sequential(*blocks))) feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name)) return stages, feature_info class ResNet(nn.Module): """ResNet / ResNeXt / SE-ResNeXt / SE-Net This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that * have > 1 stride in the 3x3 conv layer of bottleneck * have conv-bn-act ordering This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default. ResNet variants (the same modifications can be used in SE/ResNeXt models as well): * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64) * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128) * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample ResNeXt * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths * same c,d, e, s variants as ResNet can be enabled SE-ResNeXt * normal - 7x7 stem, stem_width = 64 * same c, d, e, s variants as ResNet can be enabled SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64, reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block """ def __init__( self, block, layers, num_classes=1000, in_chans=3, output_stride=32, global_pool='avg', cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False, block_reduce_first=1, down_kernel_size=1, avg_down=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0., drop_block_rate=0., zero_init_last=True, block_args=None, ): """ Args: block (nn.Module): class for the residual block. Options are BasicBlock, Bottleneck. layers (List[int]) : number of layers in each block num_classes (int): number of classification classes (default 1000) in_chans (int): number of input (color) channels. (default 3) output_stride (int): output stride of the network, 32, 16, or 8. (default 32) global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') cardinality (int): number of convolution groups for 3x3 conv in Bottleneck. (default 1) base_width (int): bottleneck channels factor. `planes * base_width / 64 * cardinality` (default 64) stem_width (int): number of channels in stem convolutions (default 64) stem_type (str): The type of stem (default ''): * '', default - a single 7x7 conv with a width of stem_width * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2 * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2 replace_stem_pool (bool): replace stem max-pooling layer with a 3x3 stride-2 convolution block_reduce_first (int): Reduction factor for first convolution output width of residual blocks, 1 for all archs except senets, where 2 (default 1) down_kernel_size (int): kernel size of residual block downsample path, 1x1 for most, 3x3 for senets (default: 1) avg_down (bool): use avg pooling for projection skip connection between stages/downsample (default False) act_layer (str, nn.Module): activation layer norm_layer (str, nn.Module): normalization layer aa_layer (nn.Module): anti-aliasing layer drop_rate (float): Dropout probability before classifier, for training (default 0.) drop_path_rate (float): Stochastic depth drop-path rate (default 0.) drop_block_rate (float): Drop block rate (default 0.) zero_init_last (bool): zero-init the last weight in residual path (usually last BN affine weight) block_args (dict): Extra kwargs to pass through to block module """ super(ResNet, self).__init__() block_args = block_args or dict() assert output_stride in (8, 16, 32) self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False act_layer = get_act_layer(act_layer) norm_layer = get_norm_layer(norm_layer) # Stem deep_stem = 'deep' in stem_type inplanes = stem_width * 2 if deep_stem else 64 if deep_stem: stem_chs = (stem_width, stem_width) if 'tiered' in stem_type: stem_chs = (3 * (stem_width // 4), stem_width) self.conv1 = nn.Sequential(*[ nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False), norm_layer(stem_chs[0]), act_layer(inplace=True), nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False), norm_layer(stem_chs[1]), act_layer(inplace=True), nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)]) else: self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(inplanes) self.act1 = act_layer(inplace=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')] # Stem pooling. The name 'maxpool' remains for weight compatibility. if replace_stem_pool: self.maxpool = nn.Sequential(*filter(None, [ nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False), create_aa(aa_layer, channels=inplanes, stride=2) if aa_layer is not None else None, norm_layer(inplanes), act_layer(inplace=True), ])) else: if aa_layer is not None: if issubclass(aa_layer, nn.AvgPool2d): self.maxpool = aa_layer(2) else: self.maxpool = nn.Sequential(*[ nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=inplanes, stride=2)]) else: self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # Feature Blocks channels = [64, 128, 256, 512] stage_modules, stage_feature_info = make_blocks( block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width, output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down, down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args, ) for stage in stage_modules: self.add_module(*stage) # layer1, layer2, etc self.feature_info.extend(stage_feature_info) # Head (Pooling and Classifier) self.num_features = 512 * block.expansion self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.init_weights(zero_init_last=zero_init_last) @torch.jit.ignore def init_weights(self, zero_init_last=True): for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if zero_init_last: for m in self.modules(): if hasattr(m, 'zero_init_last'): m.zero_init_last() @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^conv1|bn1|maxpool', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self, name_only=False): return 'fc' if name_only else self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.maxpool(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq([self.layer1, self.layer2, self.layer3, self.layer4], x, flatten=True) else: x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate: x = F.dropout(x, p=float(self.drop_rate), training=self.training) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_resnet(variant, pretrained=False, **kwargs): return build_model_with_cfg(ResNet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } def _tcfg(url='', **kwargs): return _cfg(url=url, **dict({'interpolation': 'bicubic'}, **kwargs)) def _ttcfg(url='', **kwargs): return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'test_input_size': (3, 288, 288), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', }, **kwargs)) def _rcfg(url='', **kwargs): return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'crop_pct': 0.95, 'test_input_size': (3, 288, 288), 'test_crop_pct': 1.0, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476' }, **kwargs)) def _r3cfg(url='', **kwargs): return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'input_size': (3, 160, 160), 'pool_size': (5, 5), 'crop_pct': 0.95, 'test_input_size': (3, 224, 224), 'test_crop_pct': 0.95, 'origin_url': 'https://github.com/huggingface/pytorch-image-models', 'paper_ids': 'arXiv:2110.00476', }, **kwargs)) def _gcfg(url='', **kwargs): return _cfg(url=url, **dict({ 'interpolation': 'bicubic', 'origin_url': 'https://cv.gluon.ai/model_zoo/classification.html', }, **kwargs)) default_cfgs = generate_default_cfgs({ # ResNet and Wide ResNet trained w/ timm (RSB paper and others) 'resnet10t.c3_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet10t_176_c3-f3215ab1.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet14t.c3_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet14t_176_c3-c4ed2c37.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_crop_pct=0.95, test_input_size=(3, 224, 224), first_conv='conv1.0'), 'resnet18.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a1_0-d63eafa0.pth'), 'resnet18.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a2_0-b61bd467.pth'), 'resnet18.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet18_a3_0-40c531c8.pth'), 'resnet18d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth', first_conv='conv1.0'), 'resnet34.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a1_0-46f8f793.pth'), 'resnet34.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a2_0-82d47d71.pth'), 'resnet34.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet34_a3_0-a20cabb6.pth', crop_pct=0.95), 'resnet34.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'), 'resnet34d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth', first_conv='conv1.0'), 'resnet26.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth'), 'resnet26d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth', first_conv='conv1.0'), 'resnet26t.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94, test_input_size=(3, 320, 320), test_crop_pct=1.0), 'resnet50.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth'), 'resnet50.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1h2_176-001a1197.pth', input_size=(3, 176, 176), pool_size=(6, 6), crop_pct=0.9, test_input_size=(3, 224, 224), test_crop_pct=1.0), 'resnet50.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a2_0-a2746f79.pth'), 'resnet50.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a3_0-59cae1ef.pth'), 'resnet50.b1k_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b1k-532a802a.pth'), 'resnet50.b2k_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_b2k-1ba180c1.pth'), 'resnet50.c1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c1-5ba5e060.pth'), 'resnet50.c2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_c2-d01e05b2.pth'), 'resnet50.d_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_d-f39db8af.pth'), 'resnet50.ram_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth'), 'resnet50.am_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_am-6c502b37.pth'), 'resnet50.ra_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnet50_ra-85ebb6e5.pth'), 'resnet50.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/rw_resnet50-86acaeed.pth'), 'resnet50d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth', first_conv='conv1.0'), 'resnet50d.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a1_0-e20cff14.pth', first_conv='conv1.0'), 'resnet50d.a2_in1k': _rcfg( url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a2_0-a3adc64d.pth', first_conv='conv1.0'), 'resnet50d.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50d_a3_0-403fdfad.pth', first_conv='conv1.0'), 'resnet50t.untrained': _ttcfg(first_conv='conv1.0'), 'resnet101.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth'), 'resnet101.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1_0-cdcb52a9.pth'), 'resnet101.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a2_0-6edb36c7.pth'), 'resnet101.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a3_0-1db14157.pth'), 'resnet101d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet152.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth'), 'resnet152.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1_0-2eee8a7a.pth'), 'resnet152.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a2_0-b4c6978f.pth'), 'resnet152.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a3_0-134d4688.pth'), 'resnet152d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'resnet200.untrained': _ttcfg(), 'resnet200d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320)), 'wide_resnet50_2.racm_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth'), # torchvision resnet weights 'resnet18.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet18-5c106cde.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet34.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet34-333f7ec4.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-19c8e357.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet50.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet101.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet101-cd907fc2.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-b121ed2d.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnet152.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnet152-f82ba261.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet50_2.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet50_2-9ba9bcbe.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'wide_resnet101_2.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/wide_resnet101_2-d733dc28.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), # ResNets w/ alternative norm layers 'resnet50_gn.a1h_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_gn_a1h2-8fe6c4d0.pth', crop_pct=0.94), # ResNeXt trained in timm (RSB paper and others) 'resnext50_32x4d.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1h-0146ab0a.pth'), 'resnext50_32x4d.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a1_0-b5a91a1d.pth'), 'resnext50_32x4d.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a2_0-efc76add.pth'), 'resnext50_32x4d.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/resnext50_32x4d_a3_0-3e450271.pth'), 'resnext50_32x4d.ra_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth'), 'resnext50d_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth', first_conv='conv1.0'), 'resnext101_32x4d.untrained': _ttcfg(), 'resnext101_64x4d.c1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnext101_64x4d_c-0d0e0cc0.pth'), # torchvision ResNeXt weights 'resnext50_32x4d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_64x4d.tv_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_64x4d-173b62eb.pth', license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext50_32x4d.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext50_32x4d-1a0047aa.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), 'resnext101_32x8d.tv2_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/resnext101_32x8d-110c445d.pth', input_size=(3, 176, 176), pool_size=(6, 6), test_input_size=(3, 224, 224), test_crop_pct=0.965, license='bsd-3-clause', origin_url='https://github.com/pytorch/vision'), # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags # from https://github.com/facebookresearch/WSL-Images # Please note the CC-BY-NC 4.0 license on these weights, non-commercial use only. 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x16d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x32d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), 'resnext101_32x48d.fb_wsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/WSL-Images'), # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. 'resnet18.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only. 'resnet18.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnet50.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth', license='cc-by-nc-4.0', origin_url='https://github.com/facebookresearch/semi-supervised-ImageNet1K-models'), # Efficient Channel Attention ResNets 'ecaresnet26t.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnetlight.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnetlight-75a9c627.pth', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d-93c81e3b.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50d_pruned.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet50d_p-e4fa23c2.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet50t.ra2_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), test_crop_pct=0.95, test_input_size=(3, 320, 320)), 'ecaresnet50t.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a1_0-99bd76a8.pth', first_conv='conv1.0'), 'ecaresnet50t.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a2_0-b1c7b745.pth', first_conv='conv1.0'), 'ecaresnet50t.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/ecaresnet50t_a3_0-8cc311f1.pth', first_conv='conv1.0'), 'ecaresnet101d.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d-153dad65.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet101d_pruned.miil_in1k': _tcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/ecaresnet101d_p-9e74cb91.pth', first_conv='conv1.0', test_crop_pct=0.95, test_input_size=(3, 288, 288)), 'ecaresnet200d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.95, pool_size=(8, 8)), 'ecaresnet269d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 352, 352)), # Efficient Channel Attention ResNeXts 'ecaresnext26t_32x4d.untrained': _tcfg(first_conv='conv1.0'), 'ecaresnext50t_32x4d.untrained': _tcfg(first_conv='conv1.0'), # Squeeze-Excitation ResNets, to eventually replace the models in senet.py 'seresnet18.untrained': _ttcfg(), 'seresnet34.untrained': _ttcfg(), 'seresnet50.a1_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a1_0-ffa00869.pth', crop_pct=0.95), 'seresnet50.a2_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a2_0-850de0d9.pth', crop_pct=0.95), 'seresnet50.a3_in1k': _r3cfg( hf_hub_id='timm/', url='https://github.com/huggingface/pytorch-image-models/releases/download/v0.1-rsb-weights/seresnet50_a3_0-317ecd56.pth', crop_pct=0.95), 'seresnet50.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth'), 'seresnet50t.untrained': _ttcfg( first_conv='conv1.0'), 'seresnet101.untrained': _ttcfg(), 'seresnet152.untrained': _ttcfg(), 'seresnet152d.ra2_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 320, 320) ), 'seresnet200d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), 'seresnet269d.untrained': _ttcfg( first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8)), # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py 'seresnext26d_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth', first_conv='conv1.0'), 'seresnext26t_32x4d.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth', first_conv='conv1.0'), 'seresnext50_32x4d.racm_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth'), 'seresnext101_32x4d.untrained': _ttcfg(), 'seresnext101_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101_32x8d_ah-e6bc4c0a.pth'), 'seresnext101d_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnext101d_32x8d_ah-191d7b94.pth', first_conv='conv1.0'), # ResNets with anti-aliasing / blur pool 'resnetaa50d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k_288': _ttcfg( hf_hub_id='timm/', crop_pct=0.95, input_size=(3, 288, 288), pool_size=(9, 9), test_input_size=(3, 320, 320), test_crop_pct=1.0, first_conv='conv1.0'), 'seresnextaa101d_32x8d.sw_in12k_ft_in1k': _ttcfg( hf_hub_id='timm/', first_conv='conv1.0', test_crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k_ft_in1k_384': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='conv1.0', pool_size=(12, 12), input_size=(3, 384, 384), crop_pct=1.0), 'seresnextaa201d_32x8d.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, interpolation='bicubic', first_conv='conv1.0', crop_pct=0.95, input_size=(3, 320, 320), pool_size=(10, 10), test_input_size=(3, 384, 384), test_crop_pct=1.0), 'resnetaa50d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa50d.d_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetaa101d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'seresnextaa101d_32x8d.sw_in12k': _ttcfg( hf_hub_id='timm/', num_classes=11821, first_conv='conv1.0', crop_pct=0.95, test_crop_pct=1.0), 'resnetblur18.untrained': _ttcfg(), 'resnetblur50.bt_in1k': _ttcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth'), 'resnetblur50d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetblur101d.untrained': _ttcfg(first_conv='conv1.0'), 'resnetaa50.a1h_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnetaa50_a1h-4cf422b3.pth'), 'seresnetaa50d.untrained': _ttcfg(first_conv='conv1.0'), 'seresnextaa101d_32x8d.ah_in1k': _rcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/seresnextaa101d_32x8d_ah-83c8ae12.pth', first_conv='conv1.0'), # ResNet-RS models 'resnetrs50.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth', input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs101.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth', input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs152.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs200.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/resnetrs200_c-6b698b88.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs270.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs350.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth', input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384), interpolation='bicubic', first_conv='conv1.0'), 'resnetrs420.tf_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416), interpolation='bicubic', first_conv='conv1.0'), # gluon resnet weights 'resnet18.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet18_v1b-0757602b.pth'), 'resnet34.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet34_v1b-c6d82d59.pth'), 'resnet50.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1b-0ebe02e2.pth'), 'resnet101.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1b-3b017079.pth'), 'resnet152.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1b-c1edb0dd.pth'), 'resnet50c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1c-48092f55.pth', first_conv='conv1.0'), 'resnet101c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1c-1f26822a.pth', first_conv='conv1.0'), 'resnet152c.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1c-a3bb0b98.pth', first_conv='conv1.0'), 'resnet50d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1d-818a1b1b.pth', first_conv='conv1.0'), 'resnet101d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1d-0f9c8644.pth', first_conv='conv1.0'), 'resnet152d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1d-bd354e12.pth', first_conv='conv1.0'), 'resnet50s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet50_v1s-1762acc0.pth', first_conv='conv1.0'), 'resnet101s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet101_v1s-60fe0cc1.pth', first_conv='conv1.0'), 'resnet152s.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnet152_v1s-dcc41b81.pth', first_conv='conv1.0'), 'resnext50_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth'), 'resnext101_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth'), 'resnext101_64x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth'), 'seresnext50_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext50_32x4d-90cf2d6e.pth'), 'seresnext101_32x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_32x4d-cf52900d.pth'), 'seresnext101_64x4d.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_seresnext101_64x4d-f9926f93.pth'), 'senet154.gluon_in1k': _gcfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_senet154-70a1a3c0.pth', first_conv='conv1.0'), }) @register_model def resnet10t(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-10-T model. """ model_args = dict(block=BasicBlock, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet10t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet14t(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-14-T model. """ model_args = dict(block=Bottleneck, layers=[1, 1, 1, 1], stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet14t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-18 model. """ model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2]) return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs)) @register_model def resnet18d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-18-D model. """ model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet18d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-34 model. """ model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3]) return _create_resnet('resnet34', pretrained, **dict(model_args, **kwargs)) @register_model def resnet34d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-34-D model. """ model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-26 model. """ model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2]) return _create_resnet('resnet26', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26t(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-26-T model. """ model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet26d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-26-D model. """ model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet26d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50 model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) return _create_resnet('resnet50', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50c(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-C model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep') return _create_resnet('resnet50c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50s(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-S model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=64, stem_type='deep') return _create_resnet('resnet50s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50t(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-T model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True) return _create_resnet('resnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101 model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3]) return _create_resnet('resnet101', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101c(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-C model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep') return _create_resnet('resnet101c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet101s(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-S model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=64, stem_type='deep') return _create_resnet('resnet101s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-152 model. """ model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3]) return _create_resnet('resnet152', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152c(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-152-C model. """ model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep') return _create_resnet('resnet152c', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-152-D model. """ model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def resnet152s(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-152-S model. """ model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], stem_width=64, stem_type='deep') return _create_resnet('resnet152s', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-200 model. """ model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3]) return _create_resnet('resnet200', pretrained, **dict(model_args, **kwargs)) @register_model def resnet200d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model. """ model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet50_2(pretrained=False, **kwargs) -> ResNet: """Constructs a Wide ResNet-50-2 model. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 channels, and in Wide ResNet-50-2 has 2048-1024-2048. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128) return _create_resnet('wide_resnet50_2', pretrained, **dict(model_args, **kwargs)) @register_model def wide_resnet101_2(pretrained=False, **kwargs) -> ResNet: """Constructs a Wide ResNet-101-2 model. The model is the same as ResNet except for the bottleneck number of channels which is twice larger in every block. The number of channels in outer 1x1 convolutions is the same. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128) return _create_resnet('wide_resnet101_2', pretrained, **dict(model_args, **kwargs)) @register_model def resnet50_gn(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50 model w/ GroupNorm """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs) return _create_resnet('resnet50_gn', pretrained, norm_layer=GroupNorm, **model_args) @register_model def resnext50_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt50-32x4d model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4) return _create_resnet('resnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext50d_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnext50d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x4d model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4) return _create_resnet('resnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x8d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x8d model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8) return _create_resnet('resnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x16d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x16d model """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16) return _create_resnet('resnext101_32x16d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_32x32d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt-101 32x32d model """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32) return _create_resnet('resnext101_32x32d', pretrained, **dict(model_args, **kwargs)) @register_model def resnext101_64x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNeXt101-64x4d model. """ model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4) return _create_resnet('resnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet26t(pretrained=False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. """ model_args = dict( block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet26t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with eca. """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet50d_pruned(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model pruned with eca. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet50t(pretrained=False, **kwargs) -> ResNet: """Constructs an ECA-ResNet-50-T model. Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn. """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnetlight(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D light model with eca. """ model_args = dict( block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnetlight', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with eca. """ model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet101d_pruned(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model pruned with eca. The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf """ model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **dict(model_args, **kwargs)) @register_model def ecaresnet200d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model with ECA. """ model_args = dict( block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnet269d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-269-D model with ECA. """ model_args = dict( block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext26t_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. This model replaces SE module with the ECA module """ model_args = dict( block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def ecaresnext50t_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs an ECA-ResNeXt-50-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. This model replaces SE module with the ECA module """ model_args = dict( block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca')) return _create_resnet('ecaresnext50t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet18(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se')) return _create_resnet('seresnet18', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet34(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se')) return _create_resnet('seresnet34', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se')) return _create_resnet('seresnet50', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet50t(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet50t', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet101(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se')) return _create_resnet('seresnet101', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152(pretrained=False, **kwargs) -> ResNet: model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se')) return _create_resnet('seresnet152', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet152d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet152d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet200d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-200-D model with SE attn. """ model_args = dict( block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet200d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnet269d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-269-D model with SE attn. """ model_args = dict( block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnet269d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26d_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a SE-ResNeXt-26-D model.` This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for combination of deep stem and avg_pool in downsample. """ model_args = dict( block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26d_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26t_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a SE-ResNet-26-T model. This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem. """ model_args = dict( block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32, stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext26t_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext26tn_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a SE-ResNeXt-26-T model. NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note so keeping this def for backwards compat with any uses out there. Old 't' model is lost. """ return seresnext26t_32x4d(pretrained=pretrained, **kwargs) @register_model def seresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext50_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x4d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x4d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_32x8d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101d_32x8d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnext101_64x4d(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, block_args=dict(attn_layer='se')) return _create_resnet('seresnext101_64x4d', pretrained, **dict(model_args, **kwargs)) @register_model def senet154(pretrained=False, **kwargs) -> ResNet: model_args = dict( block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep', down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se')) return _create_resnet('senet154', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur18(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-18 model with blur anti-aliasing """ model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d) return _create_resnet('resnetblur18', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50 model with blur anti-aliasing """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d) return _create_resnet('resnetblur50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur50d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with blur anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetblur101d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with blur anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=BlurPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetblur101d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa34d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-34-D model w/ avgpool anti-aliasing """ model_args = dict( block=BasicBlock, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa34d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50 model with avgpool anti-aliasing """ model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d) return _create_resnet('resnetaa50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa50d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-50-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetaa101d(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-101-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True) return _create_resnet('resnetaa101d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnetaa50d(pretrained=False, **kwargs) -> ResNet: """Constructs a SE=ResNet-50-D model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=nn.AvgPool2d, stem_width=32, stem_type='deep', avg_down=True, block_args=dict(attn_layer='se')) return _create_resnet('seresnetaa50d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa101d_32x8d(pretrained=False, **kwargs) -> ResNet: """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, stem_width=32, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa101d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def seresnextaa201d_32x8d(pretrained=False, **kwargs): """Constructs a SE=ResNeXt-101-D 32x8d model with avgpool anti-aliasing """ model_args = dict( block=Bottleneck, layers=[3, 24, 36, 4], cardinality=32, base_width=8, stem_width=64, stem_type='deep', avg_down=True, aa_layer=nn.AvgPool2d, block_args=dict(attn_layer='se')) return _create_resnet('seresnextaa201d_32x8d', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs50(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-50 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs50', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs101(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-101 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs101', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs152(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-152 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs152', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs200(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-200 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs200', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs270(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-270 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs270', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs350(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-350 model. Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs350', pretrained, **dict(model_args, **kwargs)) @register_model def resnetrs420(pretrained=False, **kwargs) -> ResNet: """Constructs a ResNet-RS-420 model Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579 Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs """ attn_layer = partial(get_attn('se'), rd_ratio=0.25) model_args = dict( block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True, avg_down=True, block_args=dict(attn_layer=attn_layer)) return _create_resnet('resnetrs420', pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'tv_resnet34': 'resnet34.tv_in1k', 'tv_resnet50': 'resnet50.tv_in1k', 'tv_resnet101': 'resnet101.tv_in1k', 'tv_resnet152': 'resnet152.tv_in1k', 'tv_resnext50_32x4d' : 'resnext50_32x4d.tv_in1k', 'ig_resnext101_32x8d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x16d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x32d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ig_resnext101_32x48d': 'resnext101_32x8d.fb_wsl_ig1b_ft_in1k', 'ssl_resnet18': 'resnet18.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnet50': 'resnet50.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext50_32x4d': 'resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x4d': 'resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x8d': 'resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k', 'ssl_resnext101_32x16d': 'resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k', 'swsl_resnet18': 'resnet18.fb_swsl_ig1b_ft_in1k', 'swsl_resnet50': 'resnet50.fb_swsl_ig1b_ft_in1k', 'swsl_resnext50_32x4d': 'resnext50_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x4d': 'resnext101_32x4d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x8d': 'resnext101_32x8d.fb_swsl_ig1b_ft_in1k', 'swsl_resnext101_32x16d': 'resnext101_32x16d.fb_swsl_ig1b_ft_in1k', 'gluon_resnet18_v1b': 'resnet18.gluon_in1k', 'gluon_resnet34_v1b': 'resnet34.gluon_in1k', 'gluon_resnet50_v1b': 'resnet50.gluon_in1k', 'gluon_resnet101_v1b': 'resnet101.gluon_in1k', 'gluon_resnet152_v1b': 'resnet152.gluon_in1k', 'gluon_resnet50_v1c': 'resnet50c.gluon_in1k', 'gluon_resnet101_v1c': 'resnet101c.gluon_in1k', 'gluon_resnet152_v1c': 'resnet152c.gluon_in1k', 'gluon_resnet50_v1d': 'resnet50d.gluon_in1k', 'gluon_resnet101_v1d': 'resnet101d.gluon_in1k', 'gluon_resnet152_v1d': 'resnet152d.gluon_in1k', 'gluon_resnet50_v1s': 'resnet50s.gluon_in1k', 'gluon_resnet101_v1s': 'resnet101s.gluon_in1k', 'gluon_resnet152_v1s': 'resnet152s.gluon_in1k', 'gluon_resnext50_32x4d': 'resnext50_32x4d.gluon_in1k', 'gluon_resnext101_32x4d': 'resnext101_32x4d.gluon_in1k', 'gluon_resnext101_64x4d': 'resnext101_64x4d.gluon_in1k', 'gluon_seresnext50_32x4d': 'seresnext50_32x4d.gluon_in1k', 'gluon_seresnext101_32x4d': 'seresnext101_32x4d.gluon_in1k', 'gluon_seresnext101_64x4d': 'seresnext101_64x4d.gluon_in1k', 'gluon_senet154': 'senet154.gluon_in1k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/resnetv2.py
"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfoer (BiT) source code at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have been included here as pretrained models from their original .NPZ checkpoints. Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and extra padding support to allow porting of official Hybrid ResNet pretrained weights from https://github.com/google-research/vision_transformer Thanks to the Google team for the above two repositories and associated papers: * Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 * An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 * Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. """ # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict # pylint: disable=g-importing-member from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \ DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__( self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0., ): super().__init__() first_dilation = first_dilation or dilation conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer( in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.norm1 = norm_layer(in_chs) self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm2 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm3 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv3.weight) def forward(self, x): x_preact = self.norm1(x) # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(x_preact) # residual branch x = self.conv1(x_preact) x = self.conv2(self.norm2(x)) x = self.conv3(self.norm3(x)) x = self.drop_path(x) return x + shortcut class Bottleneck(nn.Module): """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. """ def __init__( self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0., ): super().__init__() first_dilation = first_dilation or dilation act_layer = act_layer or nn.ReLU conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer( in_chs, out_chs, stride=stride, dilation=dilation, preact=False, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm1 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm2 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.norm3 = norm_layer(out_chs, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act3 = act_layer(inplace=True) def zero_init_last(self): if getattr(self.norm3, 'weight', None) is not None: nn.init.zeros_(self.norm3.weight) def forward(self, x): # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(x) # residual x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) x = self.norm2(x) x = self.conv3(x) x = self.norm3(x) x = self.drop_path(x) x = self.act3(x + shortcut) return x class DownsampleConv(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None, ): super(DownsampleConv, self).__init__() self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(x)) class DownsampleAvg(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None, ): """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(self.pool(x))) class ResNetStage(nn.Module): """ResNet Stage.""" def __init__( self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, avg_down=False, block_dpr=None, block_fn=PreActBottleneck, act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs, ): super(ResNetStage, self).__init__() first_dilation = 1 if dilation in (1, 2) else 2 layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) proj_layer = DownsampleAvg if avg_down else DownsampleConv prev_chs = in_chs self.blocks = nn.Sequential() for block_idx in range(depth): drop_path_rate = block_dpr[block_idx] if block_dpr else 0. stride = stride if block_idx == 0 else 1 self.blocks.add_module(str(block_idx), block_fn( prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, **layer_kwargs, **block_kwargs, )) prev_chs = out_chs first_dilation = dilation proj_layer = None def forward(self, x): x = self.blocks(x) return x def is_stem_deep(stem_type): return any([s in stem_type for s in ('deep', 'tiered')]) def create_resnetv2_stem( in_chs, out_chs=64, stem_type='', preact=True, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), ): stem = OrderedDict() assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') # NOTE conv padding mode can be changed by overriding the conv_layer def if is_stem_deep(stem_type): # A 3 deep 3x3 conv stack as in ResNet V1D models if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py else: stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) stem['norm1'] = norm_layer(stem_chs[0]) stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) stem['norm2'] = norm_layer(stem_chs[1]) stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) if not preact: stem['norm3'] = norm_layer(out_chs) else: # The usual 7x7 stem conv stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if not preact: stem['norm'] = norm_layer(out_chs) if 'fixed' in stem_type: # 'fixed' SAME padding approximation that is used in BiT models stem['pad'] = nn.ConstantPad2d(1, 0.) stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) elif 'same' in stem_type: # full, input size based 'SAME' padding, used in ViT Hybrid model stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') else: # the usual PyTorch symmetric padding stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) return nn.Sequential(stem) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode. """ def __init__( self, layers, channels=(256, 512, 1024, 2048), num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, act_layer=nn.ReLU, norm_layer=partial(GroupNormAct, num_groups=32), conv_layer=StdConv2d, drop_rate=0., drop_path_rate=0., zero_init_last=False, ): """ Args: layers (List[int]) : number of layers in each block channels (List[int]) : number of channels in each block: num_classes (int): number of classification classes (default 1000) in_chans (int): number of input (color) channels. (default 3) global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') output_stride (int): output stride of the network, 32, 16, or 8. (default 32) width_factor (int): channel (width) multiplication factor stem_chs (int): stem width (default: 64) stem_type (str): stem type (default: '' == 7x7) avg_down (bool): average pooling in residual downsampling (default: False) preact (bool): pre-activiation (default: True) act_layer (Union[str, nn.Module]): activation layer norm_layer (Union[str, nn.Module]): normalization layer conv_layer (nn.Module): convolution module drop_rate: classifier dropout rate (default: 0.) drop_path_rate: stochastic depth rate (default: 0.) zero_init_last: zero-init last weight in residual path (default: False) """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate wf = width_factor norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) act_layer = get_act_layer(act_layer) self.feature_info = [] stem_chs = make_divisible(stem_chs * wf) self.stem = create_resnetv2_stem( in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer, ) stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) prev_chs = stem_chs curr_stride = 4 dilation = 1 block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] block_fn = PreActBottleneck if preact else Bottleneck self.stages = nn.Sequential() for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): out_chs = make_divisible(c * wf) stride = 1 if stage_idx == 0 else 2 if curr_stride >= output_stride: dilation *= stride stride = 1 stage = ResNetStage( prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn, ) prev_chs = out_chs curr_stride *= stride self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] self.stages.add_module(str(stage_idx), stage) self.num_features = prev_chs self.norm = norm_layer(self.num_features) if preact else nn.Identity() self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True, ) self.init_weights(zero_init_last=zero_init_last) self.grad_checkpointing = False @torch.jit.ignore def init_weights(self, zero_init_last=True): named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix='resnet/'): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x, flatten=True) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): nn.init.normal_(module.weight, mean=0.0, std=0.01) nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() @torch.no_grad() def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): import numpy as np def t2p(conv_weights): """Possibly convert HWIO to OIHW.""" if conv_weights.ndim == 4: conv_weights = conv_weights.transpose([3, 2, 0, 1]) return torch.from_numpy(conv_weights) weights = np.load(checkpoint_path) stem_conv_w = adapt_input_conv( model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) model.stem.conv.weight.copy_(stem_conv_w) model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) for i, (sname, stage) in enumerate(model.stages.named_children()): for j, (bname, block) in enumerate(stage.blocks.named_children()): cname = 'standardized_conv2d' block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) if block.downsample is not None: w = weights[f'{block_prefix}a/proj/{cname}/kernel'] block.downsample.conv.weight.copy_(t2p(w)) def _create_resnetv2(variant, pretrained=False, **kwargs): feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( ResNetV2, variant, pretrained, feature_cfg=feature_cfg, **kwargs, ) def _create_resnetv2_bit(variant, pretrained=False, **kwargs): return _create_resnetv2( variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 'resnetv2_50x1_bit.goog_distilled_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True), # pretrained on imagenet21k, finetuned on imagenet1k 'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480? # trained on imagenet-21k 'resnetv2_50x1_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50x3_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x1_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x3_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x2_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x4_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50.a1h_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50t.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_101.a1h_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_101d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_152.untrained': _cfg( interpolation='bicubic'), 'resnetv2_152d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50d_gn.ah_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_evos.ah_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_frn.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), }) @register_model def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) @register_model def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) @register_model def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) @register_model def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) @register_model def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) @register_model def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) @register_model def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='tiered', avg_down=True) return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs)) # Experimental configs (may change / be removed) @register_model def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k', 'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k', 'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k', 'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k', 'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k', 'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k', 'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k', 'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k', 'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k', 'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k', 'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k', 'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k', 'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k', 'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', 'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/rexnet.py
""" ReXNet A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` - https://arxiv.org/abs/2007.00992 Adapted from original impl at https://github.com/clovaai/rexnet Copyright (c) 2020-present NAVER Corp. MIT license Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman Copyright 2020 Ross Wightman """ from functools import partial from math import ceil import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule from ._builder import build_model_with_cfg from ._efficientnet_builder import efficientnet_init_weights from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['RexNet'] # model_registry will add each entrypoint fn to this SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d) class LinearBottleneck(nn.Module): def __init__( self, in_chs, out_chs, stride, dilation=(1, 1), exp_ratio=1.0, se_ratio=0., ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_path=None, ): super(LinearBottleneck, self).__init__() self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs self.in_channels = in_chs self.out_channels = out_chs if exp_ratio != 1.: dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div) self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer) else: dw_chs = in_chs self.conv_exp = None self.conv_dw = ConvNormAct( dw_chs, dw_chs, kernel_size=3, stride=stride, dilation=dilation[0], groups=dw_chs, apply_act=False, ) if se_ratio > 0: self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div)) else: self.se = None self.act_dw = create_act_layer(dw_act_layer) self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False) self.drop_path = drop_path def feat_channels(self, exp=False): return self.conv_dw.out_channels if exp else self.out_channels def forward(self, x): shortcut = x if self.conv_exp is not None: x = self.conv_exp(x) x = self.conv_dw(x) if self.se is not None: x = self.se(x) x = self.act_dw(x) x = self.conv_pwl(x) if self.use_shortcut: if self.drop_path is not None: x = self.drop_path(x) x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1) return x def _block_cfg( width_mult=1.0, depth_mult=1.0, initial_chs=16, final_chs=180, se_ratio=0., ch_div=1, ): layers = [1, 2, 2, 3, 3, 5] strides = [1, 2, 2, 2, 1, 2] layers = [ceil(element * depth_mult) for element in layers] strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], []) exp_ratios = [1] * layers[0] + [6] * sum(layers[1:]) depth = sum(layers[:]) * 3 base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs # The following channel configuration is a simple instance to make each layer become an expand layer. out_chs_list = [] for i in range(depth // 3): out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div)) base_chs += final_chs / (depth // 3 * 1.0) se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:]) return list(zip(out_chs_list, exp_ratios, strides, se_ratios)) def _build_blocks( block_cfg, prev_chs, width_mult, ch_div=1, output_stride=32, act_layer='swish', dw_act_layer='relu6', drop_path_rate=0., ): feat_chs = [prev_chs] feature_info = [] curr_stride = 2 dilation = 1 features = [] num_blocks = len(block_cfg) for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg): next_dilation = dilation if stride > 1: fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}' feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)] if curr_stride >= output_stride: next_dilation = dilation * stride stride = 1 block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule drop_path = DropPath(block_dpr) if block_dpr > 0. else None features.append(LinearBottleneck( in_chs=prev_chs, out_chs=chs, exp_ratio=exp_ratio, stride=stride, dilation=(dilation, next_dilation), se_ratio=se_ratio, ch_div=ch_div, act_layer=act_layer, dw_act_layer=dw_act_layer, drop_path=drop_path, )) curr_stride *= stride dilation = next_dilation prev_chs = chs feat_chs += [features[-1].feat_channels()] pen_chs = make_divisible(1280 * width_mult, divisor=ch_div) feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')] features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer)) return features, feature_info class RexNet(nn.Module): def __init__( self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, initial_chs=16, final_chs=180, width_mult=1.0, depth_mult=1.0, se_ratio=1/12., ch_div=1, act_layer='swish', dw_act_layer='relu6', drop_rate=0.2, drop_path_rate=0., ): super(RexNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False assert output_stride in (32, 16, 8) stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32 stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div) self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer) block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div) features, self.feature_info = _build_blocks( block_cfg, stem_chs, width_mult, ch_div, output_stride, act_layer, dw_act_layer, drop_path_rate, ) self.num_features = features[-1].out_channels self.features = nn.Sequential(*features) self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate) efficientnet_init_weights(self) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^features\.(\d+)', ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.features, x, flatten=True) else: x = self.features(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_rexnet(variant, pretrained, **kwargs): feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( RexNet, variant, pretrained, feature_cfg=feature_cfg, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ 'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'), 'rexnetr_100.untrained': _cfg(), 'rexnetr_130.untrained': _cfg(), 'rexnetr_150.untrained': _cfg(), 'rexnetr_200.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_200.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), 'rexnetr_300.sw_in12k': _cfg( hf_hub_id='timm/', num_classes=11821, crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'), }) @register_model def rexnet_100(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.0x""" return _create_rexnet('rexnet_100', pretrained, **kwargs) @register_model def rexnet_130(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.3x""" return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs) @register_model def rexnet_150(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.5x""" return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs) @register_model def rexnet_200(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 2.0x""" return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs) @register_model def rexnet_300(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 3.0x""" return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs) @register_model def rexnetr_100(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.0x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs) @register_model def rexnetr_130(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.3x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs) @register_model def rexnetr_150(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 1.5x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs) @register_model def rexnetr_200(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 2.0x w/ rounded (mod 8) channels""" return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs) @register_model def rexnetr_300(pretrained=False, **kwargs) -> RexNet: """ReXNet V1 3.0x w/ rounded (mod 16) channels""" return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/selecsls.py
"""PyTorch SelecSLS Net example for ImageNet Classification License: CC BY 4.0 (https://creativecommons.org/licenses/by/4.0/legalcode) Author: Dushyant Mehta (@mehtadushy) SelecSLS (core) Network Architecture as proposed in "XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera, Mehta et al." https://arxiv.org/abs/1907.00837 Based on ResNet implementation in https://github.com/rwightman/pytorch-image-models and SelecSLS Net implementation in https://github.com/mehtadushy/SelecSLS-Pytorch """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SelecSls'] # model_registry will add each entrypoint fn to this class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x class SelectSeq(nn.Module): def __init__(self, mode='index', index=0): super(SelectSeq, self).__init__() self.mode = mode self.index = index @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (Tuple[torch.Tensor]) -> (torch.Tensor) pass def forward(self, x) -> torch.Tensor: if self.mode == 'index': return x[self.index] else: return torch.cat(x, dim=1) def conv_bn(in_chs, out_chs, k=3, stride=1, padding=None, dilation=1): if padding is None: padding = ((stride - 1) + dilation * (k - 1)) // 2 return nn.Sequential( nn.Conv2d(in_chs, out_chs, k, stride, padding=padding, dilation=dilation, bias=False), nn.BatchNorm2d(out_chs), nn.ReLU(inplace=True) ) class SelecSlsBlock(nn.Module): def __init__(self, in_chs, skip_chs, mid_chs, out_chs, is_first, stride, dilation=1): super(SelecSlsBlock, self).__init__() self.stride = stride self.is_first = is_first assert stride in [1, 2] # Process input with 4 conv blocks with the same number of input and output channels self.conv1 = conv_bn(in_chs, mid_chs, 3, stride, dilation=dilation) self.conv2 = conv_bn(mid_chs, mid_chs, 1) self.conv3 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv4 = conv_bn(mid_chs // 2, mid_chs, 1) self.conv5 = conv_bn(mid_chs, mid_chs // 2, 3) self.conv6 = conv_bn(2 * mid_chs + (0 if is_first else skip_chs), out_chs, 1) def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if not isinstance(x, list): x = [x] assert len(x) in [1, 2] d1 = self.conv1(x[0]) d2 = self.conv3(self.conv2(d1)) d3 = self.conv5(self.conv4(d2)) if self.is_first: out = self.conv6(torch.cat([d1, d2, d3], 1)) return [out, out] else: return [self.conv6(torch.cat([d1, d2, d3, x[1]], 1)), x[1]] class SelecSls(nn.Module): """SelecSls42 / SelecSls60 / SelecSls84 Parameters ---------- cfg : network config dictionary specifying block type, feature, and head args num_classes : int, default 1000 Number of classification classes. in_chans : int, default 3 Number of input (color) channels. drop_rate : float, default 0. Dropout probability before classifier, for training global_pool : str, default 'avg' Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' """ def __init__(self, cfg, num_classes=1000, in_chans=3, drop_rate=0.0, global_pool='avg'): self.num_classes = num_classes super(SelecSls, self).__init__() self.stem = conv_bn(in_chans, 32, stride=2) self.features = SequentialList(*[cfg['block'](*block_args) for block_args in cfg['features']]) self.from_seq = SelectSeq() # from List[tensor] -> Tensor in module compatible way self.head = nn.Sequential(*[conv_bn(*conv_args) for conv_args in cfg['head']]) self.num_features = cfg['num_features'] self.feature_info = cfg['feature_info'] self.global_pool, self.head_drop, self.fc = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^features\.(\d+)', blocks_head=r'^head' ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.features(x) x = self.head(self.from_seq(x)) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_selecsls(variant, pretrained, **kwargs): cfg = {} feature_info = [dict(num_chs=32, reduction=2, module='stem.2')] if variant.startswith('selecsls42'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.3'), dict(num_chs=480, reduction=16, module='features.5'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls42b': cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1), ] feature_info.extend([ dict(num_chs=128, reduction=4, module='features.1'), dict(num_chs=288, reduction=8, module='features.4'), dict(num_chs=416, reduction=16, module='features.8'), ]) # Head can be replaced with alternative configurations depending on the problem feature_info.append(dict(num_chs=1024, reduction=32, module='head.1')) if variant == 'selecsls60b': cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1), ] feature_info.append(dict(num_chs=1024, reduction=64, module='head.3')) cfg['num_features'] = 1024 else: cfg['head'] = [ (416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1), ] feature_info.append(dict(num_chs=1280, reduction=64, module='head.3')) cfg['num_features'] = 1280 elif variant == 'selecsls84': cfg['block'] = SelecSlsBlock # Define configuration of the network after the initial neck cfg['features'] = [ # in_chs, skip_chs, mid_chs, out_chs, is_first, stride (32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1), ] feature_info.extend([ dict(num_chs=144, reduction=4, module='features.1'), dict(num_chs=304, reduction=8, module='features.6'), dict(num_chs=512, reduction=16, module='features.12'), ]) # Head can be replaced with alternative configurations depending on the problem cfg['head'] = [ (512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1), ] cfg['num_features'] = 1280 feature_info.extend([ dict(num_chs=1024, reduction=32, module='head.1'), dict(num_chs=1280, reduction=64, module='head.3') ]) else: raise ValueError('Invalid net configuration ' + variant + ' !!!') cfg['feature_info'] = feature_info # this model can do 6 feature levels by default, unlike most others, leave as 0-4 to avoid surprises? return build_model_with_cfg( SelecSls, variant, pretrained, model_cfg=cfg, feature_cfg=dict(out_indices=(0, 1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (4, 4), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'selecsls42.untrained': _cfg( interpolation='bicubic'), 'selecsls42b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls60b.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'selecsls84.untrained': _cfg( interpolation='bicubic'), }) @register_model def selecsls42(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42 model. """ return _create_selecsls('selecsls42', pretrained, **kwargs) @register_model def selecsls42b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls42_B model. """ return _create_selecsls('selecsls42b', pretrained, **kwargs) @register_model def selecsls60(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60 model. """ return _create_selecsls('selecsls60', pretrained, **kwargs) @register_model def selecsls60b(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls60_B model. """ return _create_selecsls('selecsls60b', pretrained, **kwargs) @register_model def selecsls84(pretrained=False, **kwargs) -> SelecSls: """Constructs a SelecSls84 model. """ return _create_selecsls('selecsls84', pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/senet.py
""" SEResNet implementation from Cadene's pretrained models https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py Additional credit to https://github.com/creafz Original model: https://github.com/hujie-frank/SENet ResNet code gently borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here. """ import math from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['SENet'] def _weight_init(m): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1.) nn.init.constant_(m.bias, 0.) class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = x.mean((2, 3), keepdim=True) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): """ Base class for bottlenecks that implements `forward()` method. """ def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SEBottleneck(Bottleneck): """ Bottleneck for SENet154. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d( planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): """ ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe implementation and uses `stride=stride` in `conv1` and not in `conv2` (the latter is used in the torchvision implementation of ResNet). """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): """ ResNeXt bottleneck type C with a Squeeze-and-Excitation module. """ expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self).__init__() width = math.floor(planes * (base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes, reduction=reduction) self.downsample = downsample self.stride = stride def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) if self.downsample is not None: shortcut = self.downsample(x) out = self.se_module(out) + shortcut out = self.relu(out) return out class SENet(nn.Module): def __init__( self, block, layers, groups, reduction, drop_rate=0.2, in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=1000, global_pool='avg'): """ Parameters ---------- block (nn.Module): Bottleneck class. - For SENet154: SEBottleneck - For SE-ResNet models: SEResNetBottleneck - For SE-ResNeXt models: SEResNeXtBottleneck layers (list of ints): Number of residual blocks for 4 layers of the network (layer1...layer4). groups (int): Number of groups for the 3x3 convolution in each bottleneck block. - For SENet154: 64 - For SE-ResNet models: 1 - For SE-ResNeXt models: 32 reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - For all models: 16 dropout_p (float or None): Drop probability for the Dropout layer. If `None` the Dropout layer is not used. - For SENet154: 0.2 - For SE-ResNet models: None - For SE-ResNeXt models: None inplanes (int): Number of input channels for layer1. - For SENet154: 128 - For SE-ResNet models: 64 - For SE-ResNeXt models: 64 input_3x3 (bool): If `True`, use three 3x3 convolutions instead of a single 7x7 convolution in layer0. - For SENet154: True - For SE-ResNet models: False - For SE-ResNeXt models: False downsample_kernel_size (int): Kernel size for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 3 - For SE-ResNet models: 1 - For SE-ResNeXt models: 1 downsample_padding (int): Padding for downsampling convolutions in layer2, layer3 and layer4. - For SENet154: 1 - For SE-ResNet models: 0 - For SE-ResNeXt models: 0 num_classes (int): Number of outputs in `last_linear` layer. - For all models: 1000 """ super(SENet, self).__init__() self.inplanes = inplanes self.num_classes = num_classes self.drop_rate = drop_rate if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)), ('bn1', nn.BatchNorm2d(64)), ('relu1', nn.ReLU(inplace=True)), ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)), ('bn2', nn.BatchNorm2d(64)), ('relu2', nn.ReLU(inplace=True)), ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)), ('bn3', nn.BatchNorm2d(inplanes)), ('relu3', nn.ReLU(inplace=True)), ] else: layer0_modules = [ ('conv1', nn.Conv2d( in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)), ('bn1', nn.BatchNorm2d(inplanes)), ('relu1', nn.ReLU(inplace=True)), ] self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) # To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`. self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True) self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')] self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')] self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')] self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')] self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')] self.num_features = 512 * block.expansion self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) for m in self.modules(): _weight_init(m) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)] self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^layer0', blocks=r'^layer(\d+)' if coarse else r'^layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.last_linear def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.last_linear = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.layer0(x) x = self.pool0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) return x if pre_logits else self.last_linear(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_senet(variant, pretrained=False, **kwargs): return build_model_with_cfg(SENet, variant, pretrained, **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'layer0.conv1', 'classifier': 'last_linear', **kwargs } default_cfgs = generate_default_cfgs({ 'legacy_senet154.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_senet154-e9eb9fe6.pth'), 'legacy_seresnet18.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth', interpolation='bicubic'), 'legacy_seresnet34.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'), 'legacy_seresnet50.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'), 'legacy_seresnet101.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'), 'legacy_seresnet152.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'), 'legacy_seresnext26_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth', interpolation='bicubic'), 'legacy_seresnext50_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext50_32x4d-f3651bad.pth'), 'legacy_seresnext101_32x4d.in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/legacy_se_resnext101_32x4d-37725eac.pth'), }) @register_model def legacy_seresnet18(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet18', pretrained, **model_args) @register_model def legacy_seresnet34(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet34', pretrained, **model_args) @register_model def legacy_seresnet50(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet50', pretrained, **model_args) @register_model def legacy_seresnet101(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet101', pretrained, **model_args) @register_model def legacy_seresnet152(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs) return _create_senet('legacy_seresnet152', pretrained, **model_args) @register_model def legacy_senet154(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16, downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs) return _create_senet('legacy_senet154', pretrained, **model_args) @register_model def legacy_seresnext26_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args) @register_model def legacy_seresnext50_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args) @register_model def legacy_seresnext101_32x4d(pretrained=False, **kwargs) -> SENet: model_args = dict( block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs) return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/sequencer.py
""" Sequencer Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf """ # Copyright (c) 2022. Yuki Tatsunami # Licensed under the Apache License, Version 2.0 (the "License"); import math from functools import partial from itertools import accumulate from typing import Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): stdv = 1.0 / math.sqrt(module.hidden_size) for weight in module.parameters(): nn.init.uniform_(weight, -stdv, stdv) elif hasattr(module, 'init_weights'): module.init_weights() class RNNIdentity(nn.Module): def __init__(self, *args, **kwargs): super(RNNIdentity, self).__init__() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: return x, None class RNN2dBase(nn.Module): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = 2 * hidden_size if bidirectional else hidden_size self.union = union self.with_vertical = True self.with_horizontal = True self.with_fc = with_fc self.fc = None if with_fc: if union == "cat": self.fc = nn.Linear(2 * self.output_size, input_size) elif union == "add": self.fc = nn.Linear(self.output_size, input_size) elif union == "vertical": self.fc = nn.Linear(self.output_size, input_size) self.with_horizontal = False elif union == "horizontal": self.fc = nn.Linear(self.output_size, input_size) self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) elif union == "cat": pass if 2 * self.output_size != input_size: raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") elif union == "add": pass if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") elif union == "vertical": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_horizontal = False elif union == "horizontal": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) self.rnn_v = RNNIdentity() self.rnn_h = RNNIdentity() def forward(self, x): B, H, W, C = x.shape if self.with_vertical: v = x.permute(0, 2, 1, 3) v = v.reshape(-1, H, C) v, _ = self.rnn_v(v) v = v.reshape(B, W, H, -1) v = v.permute(0, 2, 1, 3) else: v = None if self.with_horizontal: h = x.reshape(-1, W, C) h, _ = self.rnn_h(h) h = h.reshape(B, H, W, -1) else: h = None if v is not None and h is not None: if self.union == "cat": x = torch.cat([v, h], dim=-1) else: x = v + h elif v is not None: x = v elif h is not None: x = h if self.fc is not None: x = self.fc(x) return x class LSTM2d(RNN2dBase): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) if self.with_vertical: self.rnn_v = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) if self.with_horizontal: self.rnn_h = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) class Sequencer2dBlock(nn.Module): def __init__( self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() channels_dim = int(mlp_ratio * dim) self.norm1 = norm_layer(dim) self.rnn_tokens = rnn_layer( dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Shuffle(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: B, H, W, C = x.shape r = torch.randperm(H * W) x = x.reshape(B, -1, C) x = x[:, r, :].reshape(B, H, W, -1) return x class Downsample2d(nn.Module): def __init__(self, input_dim, output_dim, patch_size): super().__init__() self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.down(x) x = x.permute(0, 2, 3, 1) return x class Sequencer2dStage(nn.Module): def __init__( self, dim, dim_out, depth, patch_size, hidden_size, mlp_ratio, downsample=False, block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() if downsample: self.downsample = Downsample2d(dim, dim_out, patch_size) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): blocks.append(block_layer( dim_out, hidden_size, mlp_ratio=mlp_ratio, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop, drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path, )) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Sequencer2d(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, global_pool='avg', layers=(4, 3, 8, 3), patch_sizes=(7, 2, 2, 1), embed_dims=(192, 384, 384, 384), hidden_sizes=(48, 96, 96, 96), mlp_ratios=(3.0, 3.0, 3.0, 3.0), block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_rnn_layers=1, bidirectional=True, union="cat", with_fc=True, drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, ): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = embed_dims[-1] # num_features for consistency with other models self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) self.output_fmt = 'NHWC' self.feature_info = [] self.stem = PatchEmbed( img_size=None, patch_size=patch_sizes[0], in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, flatten=False, output_fmt='NHWC', ) assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) stages = [] prev_dim = embed_dims[0] for i, _ in enumerate(embed_dims): stages += [Sequencer2dStage( prev_dim, embed_dims[i], depth=layers[i], downsample=i > 0, patch_size=patch_sizes[i], hidden_size=hidden_sizes[i], mlp_ratio=mlp_ratios[i], block_layer=block_layer, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_rnn_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop_rate, drop_path=drop_path_rate, )] prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.norm = norm_layer(embed_dims[-1]) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.init_weights(nlhb=nlhb) def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^stages\.(\d+)\.downsample', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict # already translated checkpoint if 'model' in state_dict: state_dict = state_dict['model'] import re out_dict = {} for k, v in state_dict.items(): k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_sequencer2d(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( Sequencer2d, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 8, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, ) model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 14, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[8, 8, 16, 4], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/sknet.py
""" Selective Kernel Networks (ResNet base) Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) This was inspired by reading 'Compounding the Performance Improvements...' (https://arxiv.org/abs/2001.06268) and a streamlined impl at https://github.com/clovaai/assembled-cnn but I ended up building something closer to the original paper with some modifications of my own to better balance param count vs accuracy. Hacked together by / Copyright 2020 Ross Wightman """ import math from torch import nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectiveKernel, ConvNormAct, create_attn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class SelectiveKernelBasic(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBasic, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) assert cardinality == 1, 'BasicBlock only supports cardinality of 1' assert base_width == 64, 'BasicBlock doest not support changing base width' first_planes = planes // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = SelectiveKernel( inplanes, first_planes, stride=stride, dilation=first_dilation, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv2 = ConvNormAct( first_planes, outplanes, kernel_size=3, dilation=dilation, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv2.bn, 'weight', None) is not None: nn.init.zeros_(self.conv2.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x class SelectiveKernelBottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(SelectiveKernelBottleneck, self).__init__() sk_kwargs = sk_kwargs or {} conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer) width = int(math.floor(planes * (base_width / 64)) * cardinality) first_planes = width // reduce_first outplanes = planes * self.expansion first_dilation = first_dilation or dilation self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs) self.conv2 = SelectiveKernel( first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs) self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs) self.se = create_attn(attn_layer, outplanes) self.act = act_layer(inplace=True) self.downsample = downsample self.drop_path = drop_path def zero_init_last(self): if getattr(self.conv3.bn, 'weight', None) is not None: nn.init.zeros_(self.conv3.bn.weight) def forward(self, x): shortcut = x x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) if self.se is not None: x = self.se(x) if self.drop_path is not None: x = self.drop_path(x) if self.downsample is not None: shortcut = self.downsample(shortcut) x += shortcut x = self.act(x) return x def _create_skresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'skresnet18.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet34.ra_in1k': _cfg(hf_hub_id='timm/'), 'skresnet50.untrained': _cfg(), 'skresnet50d.untrained': _cfg( first_conv='conv1.0'), 'skresnext50_32x4d.ra_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def skresnet18(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-18 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[2, 2, 2, 2], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet18', pretrained, **model_args) @register_model def skresnet34(pretrained=False, **kwargs) -> ResNet: """Constructs a Selective Kernel ResNet-34 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(rd_ratio=1 / 8, rd_divisor=16, split_input=True) model_args = dict( block=SelectiveKernelBasic, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet34', pretrained, **model_args) @register_model def skresnet50(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50 model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50', pretrained, **model_args) @register_model def skresnet50d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNet-50-D model. Different from configs in Select Kernel paper or "Compounding the Performance Improvements..." this variation splits the input channels to the selective convolutions to keep param count down. """ sk_kwargs = dict(split_input=True) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnet50d', pretrained, **model_args) @register_model def skresnext50_32x4d(pretrained=False, **kwargs) -> ResNet: """Constructs a Select Kernel ResNeXt50-32x4d model. This should be equivalent to the SKNet-50 model in the Select Kernel Paper """ sk_kwargs = dict(rd_ratio=1/16, rd_divisor=32, split_input=False) model_args = dict( block=SelectiveKernelBottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, block_args=dict(sk_kwargs=sk_kwargs), zero_init_last=False, **kwargs) return _create_skresnet('skresnext50_32x4d', pretrained, **model_args)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/swin_transformer.py
""" Swin Transformer A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below S3 (AutoFormerV2, https://arxiv.org/abs/2111.14725) Swin weights from - https://github.com/microsoft/Cream/tree/main/AutoFormerV2 Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer # Copyright (c) 2021 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ze Liu # -------------------------------------------------------- import logging import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, ClassifierHead, to_2tuple, to_ntuple, trunc_normal_, \ _assert, use_fused_attn, resize_rel_pos_bias_table from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .vision_transformer import get_init_weights_vit __all__ = ['SwinTransformer'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) _int_or_tuple_2_t = Union[int, Tuple[int, int]] def window_partition( x: torch.Tensor, window_size: Tuple[int, int], ) -> torch.Tensor: """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: Tuple[int, int], H: int, W: int): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x def get_relative_position_index(win_h: int, win_w: int): # get pair-wise relative position index for each token inside the window coords = torch.stack(torch.meshgrid([torch.arange(win_h), torch.arange(win_w)])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += win_h - 1 # shift to start from 0 relative_coords[:, :, 1] += win_w - 1 relative_coords[:, :, 0] *= 2 * win_w - 1 return relative_coords.sum(-1) # Wh*Ww, Wh*Ww class WindowAttention(nn.Module): """ Window based multi-head self attention (W-MSA) module with relative position bias. It supports shifted and non-shifted windows. """ fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, num_heads: int, head_dim: Optional[int] = None, window_size: _int_or_tuple_2_t = 7, qkv_bias: bool = True, attn_drop: float = 0., proj_drop: float = 0., ): """ Args: dim: Number of input channels. num_heads: Number of attention heads. head_dim: Number of channels per head (dim // num_heads if not set) window_size: The height and width of the window. qkv_bias: If True, add a learnable bias to query, key, value. attn_drop: Dropout ratio of attention weight. proj_drop: Dropout ratio of output. """ super().__init__() self.dim = dim self.window_size = to_2tuple(window_size) # Wh, Ww win_h, win_w = self.window_size self.window_area = win_h * win_w self.num_heads = num_heads head_dim = head_dim or dim // num_heads attn_dim = head_dim * num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn(experimental=True) # NOTE not tested for prime-time yet # define a parameter table of relative position bias, shape: 2*Wh-1 * 2*Ww-1, nH self.relative_position_bias_table = nn.Parameter(torch.zeros((2 * win_h - 1) * (2 * win_w - 1), num_heads)) # get pair-wise relative position index for each token inside the window self.register_buffer("relative_position_index", get_relative_position_index(win_h, win_w), persistent=False) self.qkv = nn.Linear(dim, attn_dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(attn_dim, dim) self.proj_drop = nn.Dropout(proj_drop) trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def _get_rel_pos_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[ self.relative_position_index.view(-1)].view(self.window_area, self.window_area, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww return relative_position_bias.unsqueeze(0) def forward(self, x, mask: Optional[torch.Tensor] = None): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) if self.fused_attn: attn_mask = self._get_rel_pos_bias() if mask is not None: num_win = mask.shape[0] mask = mask.view(1, num_win, 1, N, N).expand(B_ // num_win, -1, self.num_heads, -1, -1) attn_mask = attn_mask + mask.reshape(-1, self.num_heads, N, N) x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_mask, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn + self._get_rel_pos_bias() if mask is not None: num_win = mask.shape[0] attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B_, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerBlock(nn.Module): """ Swin Transformer Block. """ def __init__( self, dim: int, input_resolution: _int_or_tuple_2_t, num_heads: int = 4, head_dim: Optional[int] = None, window_size: _int_or_tuple_2_t = 7, shift_size: int = 0, mlp_ratio: float = 4., qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: float = 0., act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, ): """ Args: dim: Number of input channels. input_resolution: Input resolution. window_size: Window size. num_heads: Number of attention heads. head_dim: Enforce the number of channels per head shift_size: Shift size for SW-MSA. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Dropout rate. attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.input_resolution = input_resolution ws, ss = self._calc_window_shift(window_size, shift_size) self.window_size: Tuple[int, int] = ws self.shift_size: Tuple[int, int] = ss self.window_area = self.window_size[0] * self.window_size[1] self.mlp_ratio = mlp_ratio self.norm1 = norm_layer(dim) self.attn = WindowAttention( dim, num_heads=num_heads, head_dim=head_dim, window_size=to_2tuple(self.window_size), qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.input_resolution H = math.ceil(H / self.window_size[0]) * self.window_size[0] W = math.ceil(W / self.window_size[1]) * self.window_size[1] img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False) def _calc_window_shift(self, target_window_size, target_shift_size) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) target_shift_size = to_2tuple(target_shift_size) window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return tuple(window_size), tuple(shift_size) def _attn(self, x): B, H, W, C = x.shape # cyclic shift has_shift = any(self.shift_size) if has_shift: shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) else: shifted_x = x # pad for resolution not divisible by window size pad_h = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] pad_w = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] shifted_x = torch.nn.functional.pad(shifted_x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w # partition windows x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C shifted_x = shifted_x[:, :H, :W, :].contiguous() # reverse cyclic shift if has_shift: x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) else: x = shifted_x return x def forward(self, x): B, H, W, C = x.shape x = x + self.drop_path1(self._attn(self.norm1(x))) x = x.reshape(B, -1, C) x = x + self.drop_path2(self.mlp(self.norm2(x))) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """ Patch Merging Layer. """ def __init__( self, dim: int, out_dim: Optional[int] = None, norm_layer: Callable = nn.LayerNorm, ): """ Args: dim: Number of input channels. out_dim: Number of output channels (or 2 * dim if None) norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.out_dim = out_dim or 2 * dim self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) def forward(self, x): B, H, W, C = x.shape _assert(H % 2 == 0, f"x height ({H}) is not even.") _assert(W % 2 == 0, f"x width ({W}) is not even.") x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class SwinTransformerStage(nn.Module): """ A basic Swin Transformer layer for one stage. """ def __init__( self, dim: int, out_dim: int, input_resolution: Tuple[int, int], depth: int, downsample: bool = True, num_heads: int = 4, head_dim: Optional[int] = None, window_size: _int_or_tuple_2_t = 7, mlp_ratio: float = 4., qkv_bias: bool = True, proj_drop: float = 0., attn_drop: float = 0., drop_path: Union[List[float], float] = 0., norm_layer: Callable = nn.LayerNorm, ): """ Args: dim: Number of input channels. input_resolution: Input resolution. depth: Number of blocks. downsample: Downsample layer at the end of the layer. num_heads: Number of attention heads. head_dim: Channels per head (dim // num_heads if not set) window_size: Local window size. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Projection dropout rate. attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution self.depth = depth self.grad_checkpointing = False window_size = to_2tuple(window_size) shift_size = tuple([w // 2 for w in window_size]) # patch merging layer if downsample: self.downsample = PatchMerging( dim=dim, out_dim=out_dim, norm_layer=norm_layer, ) else: assert dim == out_dim self.downsample = nn.Identity() # build blocks self.blocks = nn.Sequential(*[ SwinTransformerBlock( dim=out_dim, input_resolution=self.output_resolution, num_heads=num_heads, head_dim=head_dim, window_size=window_size, shift_size=0 if (i % 2 == 0) else shift_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, ) for i in range(depth)]) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class SwinTransformer(nn.Module): """ Swin Transformer A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - https://arxiv.org/pdf/2103.14030 """ def __init__( self, img_size: _int_or_tuple_2_t = 224, patch_size: int = 4, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), head_dim: Optional[int] = None, window_size: _int_or_tuple_2_t = 7, mlp_ratio: float = 4., qkv_bias: bool = True, drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0.1, norm_layer: Union[str, Callable] = nn.LayerNorm, weight_init: str = '', **kwargs, ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of input image channels. num_classes: Number of classes for classification head. embed_dim: Patch embedding dimension. depths: Depth of each Swin Transformer layer. num_heads: Number of attention heads in different layers. head_dim: Dimension of self-attention heads. window_size: Window size. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. drop_rate: Dropout rate. attn_drop_rate (float): Attention dropout rate. drop_path_rate (float): Stochastic depth rate. norm_layer (nn.Module): Normalization layer. """ super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.output_fmt = 'NHWC' self.num_layers = len(depths) self.embed_dim = embed_dim self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.feature_info = [] if not isinstance(embed_dim, (tuple, list)): embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0], norm_layer=norm_layer, output_fmt='NHWC', ) self.patch_grid = self.patch_embed.grid_size # build layers head_dim = to_ntuple(self.num_layers)(head_dim) if not isinstance(window_size, (list, tuple)): window_size = to_ntuple(self.num_layers)(window_size) elif len(window_size) == 2: window_size = (window_size,) * self.num_layers assert len(window_size) == self.num_layers mlp_ratio = to_ntuple(self.num_layers)(mlp_ratio) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] layers = [] in_dim = embed_dim[0] scale = 1 for i in range(self.num_layers): out_dim = embed_dim[i] layers += [SwinTransformerStage( dim=in_dim, out_dim=out_dim, input_resolution=( self.patch_grid[0] // scale, self.patch_grid[1] // scale ), depth=depths[i], downsample=i > 0, num_heads=num_heads[i], head_dim=head_dim[i], window_size=window_size[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, )] in_dim = out_dim if i > 0: scale *= 2 self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) if weight_init != 'skip': self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. named_apply(get_init_weights_vit(mode, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): nwd = set() for n, _ in self.named_parameters(): if 'relative_position_bias_table' in n: nwd.add(n) return nwd @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed', # stem and embed blocks=r'^layers\.(\d+)' if coarse else [ (r'^layers\.(\d+).downsample', (0,)), (r'^layers\.(\d+)\.\w+\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.layers: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" old_weights = True if 'head.fc.weight' in state_dict: old_weights = False import re out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) for k, v in state_dict.items(): if any([n in k for n in ('relative_position_index', 'attn_mask')]): continue # skip buffers that should not be persistent if k.endswith('relative_position_bias_table'): m = model.get_submodule(k[:-29]) if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]: v = resize_rel_pos_bias_table( v, new_window_size=m.window_size, new_bias_shape=m.relative_position_bias_table.shape, ) if old_weights: k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformer, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ 'swin_small_patch4_window7_224.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22kto1k_finetune.pth', ), 'swin_base_patch4_window7_224.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',), 'swin_base_patch4_window12_384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swin_large_patch4_window7_224.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',), 'swin_large_patch4_window12_384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'swin_tiny_patch4_window7_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',), 'swin_small_patch4_window7_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',), 'swin_base_patch4_window7_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth',), 'swin_base_patch4_window12_384.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), # tiny 22k pretrain is worse than 1k, so moved after (untagged priority is based on order) 'swin_tiny_patch4_window7_224.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22kto1k_finetune.pth',), 'swin_tiny_patch4_window7_224.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_tiny_patch4_window7_224_22k.pth', num_classes=21841), 'swin_small_patch4_window7_224.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.8/swin_small_patch4_window7_224_22k.pth', num_classes=21841), 'swin_base_patch4_window7_224.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth', num_classes=21841), 'swin_base_patch4_window12_384.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), 'swin_large_patch4_window7_224.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth', num_classes=21841), 'swin_large_patch4_window12_384.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21841), 'swin_s3_tiny_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_t-1d53f6a8.pth'), 'swin_s3_small_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_s-3bb4c69d.pth'), 'swin_s3_base_224.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/s3_b-a1e95db4.pth'), }) @register_model def swin_tiny_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-T @ 224x224, trained ImageNet-1k """ model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer( 'swin_tiny_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_small_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-S @ 224x224 """ model_args = dict(patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer( 'swin_small_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_base_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-B @ 224x224 """ model_args = dict(patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer( 'swin_base_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_base_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-B @ 384x384 """ model_args = dict(patch_size=4, window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer( 'swin_base_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_large_patch4_window7_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-L @ 224x224 """ model_args = dict(patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer( 'swin_large_patch4_window7_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_large_patch4_window12_384(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-L @ 384x384 """ model_args = dict(patch_size=4, window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer( 'swin_large_patch4_window12_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_tiny_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-S3-T @ 224x224, https://arxiv.org/abs/2111.14725 """ model_args = dict( patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_small_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-S3-S @ 224x224, https://arxiv.org/abs/2111.14725 """ model_args = dict( patch_size=4, window_size=(14, 14, 14, 7), embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swin_s3_base_224(pretrained=False, **kwargs) -> SwinTransformer: """ Swin-S3-B @ 224x224, https://arxiv.org/abs/2111.14725 """ model_args = dict( patch_size=4, window_size=(7, 7, 14, 7), embed_dim=96, depths=(2, 2, 30, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer('swin_s3_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'swin_base_patch4_window7_224_in22k': 'swin_base_patch4_window7_224.ms_in22k', 'swin_base_patch4_window12_384_in22k': 'swin_base_patch4_window12_384.ms_in22k', 'swin_large_patch4_window7_224_in22k': 'swin_large_patch4_window7_224.ms_in22k', 'swin_large_patch4_window12_384_in22k': 'swin_large_patch4_window12_384.ms_in22k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/swin_transformer_v2.py
""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/abs/2111.09883 Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer V2 # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ze Liu # -------------------------------------------------------- import math from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['SwinTransformerV2'] # model_registry will add each entrypoint fn to this _int_or_tuple_2_t = Union[int, Tuple[int, int]] def window_partition(x, window_size: Tuple[int, int]): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): """ Args: windows: (num_windows * B, window_size[0], window_size[1], C) window_size (Tuple[int, int]): Window size img_size (Tuple[int, int]): Image size Returns: x: (B, H, W, C) """ H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowAttention(nn.Module): r""" Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 pretrained_window_size (tuple[int]): The height and width of the window in pre-training. """ def __init__( self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., pretrained_window_size=[0, 0], ): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.pretrained_window_size = pretrained_window_size self.num_heads = num_heads self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1)))) # mlp to generate continuous relative position bias self.cpb_mlp = nn.Sequential( nn.Linear(2, 512, bias=True), nn.ReLU(inplace=True), nn.Linear(512, num_heads, bias=False) ) # get relative_coords_table relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) relative_coords_table = torch.stack(torch.meshgrid([ relative_coords_h, relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 if pretrained_window_size[0] > 0: relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) else: relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2( torch.abs(relative_coords_table) + 1.0) / math.log2(8) self.register_buffer("relative_coords_table", relative_coords_table, persistent=False) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww self.register_buffer("relative_position_index", relative_position_index, persistent=False) self.qkv = nn.Linear(dim, dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(dim)) self.register_buffer('k_bias', torch.zeros(dim), persistent=False) self.v_bias = nn.Parameter(torch.zeros(dim)) else: self.q_bias = None self.k_bias = None self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.softmax = nn.Softmax(dim=-1) def forward(self, x, mask: Optional[torch.Tensor] = None): """ Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) # cosine attention attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) logit_scale = torch.clamp(self.logit_scale, max=math.log(1. / 0.01)).exp() attn = attn * logit_scale relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww relative_position_bias = 16 * torch.sigmoid(relative_position_bias) attn = attn + relative_position_bias.unsqueeze(0) if mask is not None: num_win = mask.shape[0] attn = attn.view(-1, num_win, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, N, N) attn = self.softmax(attn) else: attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_, N, C) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2Block(nn.Module): """ Swin Transformer Block. """ def __init__( self, dim, input_resolution, num_heads, window_size=7, shift_size=0, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0, ): """ Args: dim: Number of input channels. input_resolution: Input resolution. num_heads: Number of attention heads. window_size: Window size. shift_size: Shift size for SW-MSA. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Dropout rate. attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. pretrained_window_size: Window size in pretraining. """ super().__init__() self.dim = dim self.input_resolution = to_2tuple(input_resolution) self.num_heads = num_heads ws, ss = self._calc_window_shift(window_size, shift_size) self.window_size: Tuple[int, int] = ws self.shift_size: Tuple[int, int] = ss self.window_area = self.window_size[0] * self.window_size[1] self.mlp_ratio = mlp_ratio self.attn = WindowAttention( dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, pretrained_window_size=to_2tuple(pretrained_window_size), ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.input_resolution img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False) def _calc_window_shift(self, target_window_size, target_shift_size) -> Tuple[Tuple[int, int], Tuple[int, int]]: target_window_size = to_2tuple(target_window_size) target_shift_size = to_2tuple(target_shift_size) window_size = [r if r <= w else w for r, w in zip(self.input_resolution, target_window_size)] shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)] return tuple(window_size), tuple(shift_size) def _attn(self, x): B, H, W, C = x.shape # cyclic shift has_shift = any(self.shift_size) if has_shift: shifted_x = torch.roll(x, shifts=(-self.shift_size[0], -self.shift_size[1]), dims=(1, 2)) else: shifted_x = x # partition windows x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_area, C) # nW*B, window_size*window_size, C # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) shifted_x = window_reverse(attn_windows, self.window_size, self.input_resolution) # B H' W' C # reverse cyclic shift if has_shift: x = torch.roll(shifted_x, shifts=self.shift_size, dims=(1, 2)) else: x = shifted_x return x def forward(self, x): B, H, W, C = x.shape x = x + self.drop_path1(self.norm1(self._attn(x))) x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """ Patch Merging Layer. """ def __init__(self, dim, out_dim=None, norm_layer=nn.LayerNorm): """ Args: dim (int): Number of input channels. out_dim (int): Number of output channels (or 2 * dim if None) norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm """ super().__init__() self.dim = dim self.out_dim = out_dim or 2 * dim self.reduction = nn.Linear(4 * dim, self.out_dim, bias=False) self.norm = norm_layer(self.out_dim) def forward(self, x): B, H, W, C = x.shape _assert(H % 2 == 0, f"x height ({H}) is not even.") _assert(W % 2 == 0, f"x width ({W}) is not even.") x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.reduction(x) x = self.norm(x) return x class SwinTransformerV2Stage(nn.Module): """ A Swin Transformer V2 Stage. """ def __init__( self, dim, out_dim, input_resolution, depth, num_heads, window_size, downsample=False, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, pretrained_window_size=0, output_nchw=False, ): """ Args: dim: Number of input channels. input_resolution: Input resolution. depth: Number of blocks. num_heads: Number of attention heads. window_size: Local window size. downsample: Use downsample layer at start of the block. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. proj_drop: Projection dropout rate attn_drop: Attention dropout rate. drop_path: Stochastic depth rate. norm_layer: Normalization layer. pretrained_window_size: Local window size in pretraining. output_nchw: Output tensors on NCHW format instead of NHWC. """ super().__init__() self.dim = dim self.input_resolution = input_resolution self.output_resolution = tuple(i // 2 for i in input_resolution) if downsample else input_resolution self.depth = depth self.output_nchw = output_nchw self.grad_checkpointing = False window_size = to_2tuple(window_size) shift_size = tuple([w // 2 for w in window_size]) # patch merging / downsample layer if downsample: self.downsample = PatchMerging(dim=dim, out_dim=out_dim, norm_layer=norm_layer) else: assert dim == out_dim self.downsample = nn.Identity() # build blocks self.blocks = nn.ModuleList([ SwinTransformerV2Block( dim=out_dim, input_resolution=self.output_resolution, num_heads=num_heads, window_size=window_size, shift_size=0 if (i % 2 == 0) else shift_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, pretrained_window_size=pretrained_window_size, ) for i in range(depth)]) def forward(self, x): x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x def _init_respostnorm(self): for blk in self.blocks: nn.init.constant_(blk.norm1.bias, 0) nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) class SwinTransformerV2(nn.Module): """ Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/abs/2111.09883 """ def __init__( self, img_size: _int_or_tuple_2_t = 224, patch_size: int = 4, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), window_size: _int_or_tuple_2_t = 7, mlp_ratio: float = 4., qkv_bias: bool = True, drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0.1, norm_layer: Callable = nn.LayerNorm, pretrained_window_sizes: Tuple[int, ...] = (0, 0, 0, 0), **kwargs, ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of input image channels. num_classes: Number of classes for classification head. embed_dim: Patch embedding dimension. depths: Depth of each Swin Transformer stage (layer). num_heads: Number of attention heads in different layers. window_size: Window size. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: If True, add a learnable bias to query, key, value. drop_rate: Head dropout rate. proj_drop_rate: Projection dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. norm_layer: Normalization layer. patch_norm: If True, add normalization after patch embedding. pretrained_window_sizes: Pretrained window sizes of each layer. output_fmt: Output tensor format if not None, otherwise output 'NHWC' by default. """ super().__init__() self.num_classes = num_classes assert global_pool in ('', 'avg') self.global_pool = global_pool self.output_fmt = 'NHWC' self.num_layers = len(depths) self.embed_dim = embed_dim self.num_features = int(embed_dim * 2 ** (self.num_layers - 1)) self.feature_info = [] if not isinstance(embed_dim, (tuple, list)): embed_dim = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] # split image into non-overlapping patches self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim[0], norm_layer=norm_layer, output_fmt='NHWC', ) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] layers = [] in_dim = embed_dim[0] scale = 1 for i in range(self.num_layers): out_dim = embed_dim[i] layers += [SwinTransformerV2Stage( dim=in_dim, out_dim=out_dim, input_resolution=( self.patch_embed.grid_size[0] // scale, self.patch_embed.grid_size[1] // scale), depth=depths[i], downsample=i > 0, num_heads=num_heads[i], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, pretrained_window_size=pretrained_window_sizes[i], )] in_dim = out_dim if i > 0: scale *= 2 self.feature_info += [dict(num_chs=out_dim, reduction=4 * scale, module=f'layers.{i}')] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.apply(self._init_weights) for bly in self.layers: bly._init_respostnorm() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): nod = set() for n, m in self.named_modules(): if any([kw in n for kw in ("cpb_mlp", "logit_scale")]): nod.add(n) return nod @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^absolute_pos_embed|patch_embed', # stem and embed blocks=r'^layers\.(\d+)' if coarse else [ (r'^layers\.(\d+).downsample', (0,)), (r'^layers\.(\d+)\.\w+\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.layers: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) native_checkpoint = 'head.fc.weight' in state_dict out_dict = {} import re for k, v in state_dict.items(): if any([n in k for n in ('relative_position_index', 'relative_coords_table', 'attn_mask')]): continue # skip buffers that should not be persistent if not native_checkpoint: # skip layer remapping for updated checkpoints k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformerV2, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth', ), 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth', ), 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, ), 'swinv2_tiny_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth', ), 'swinv2_tiny_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth', ), 'swinv2_small_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth', ), 'swinv2_small_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth', ), 'swinv2_base_window8_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth', ), 'swinv2_base_window16_256.ms_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth', ), 'swinv2_base_window12_192.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) ), 'swinv2_large_window12_192.ms_in22k': _cfg( hf_hub_id='timm/', url='https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth', num_classes=21841, input_size=(3, 192, 192), pool_size=(6, 6) ), }) @register_model def swinv2_tiny_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_tiny_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_tiny_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_tiny_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_small_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_small_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24)) return _create_swin_transformer_v2( 'swinv2_small_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window16_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window16_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window8_256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=8, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window8_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=12, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32)) return _create_swin_transformer_v2( 'swinv2_base_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=16, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_base_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_base_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=24, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_base_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12_192(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict(window_size=12, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48)) return _create_swin_transformer_v2( 'swinv2_large_window12_192', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to16_192to256(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=16, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_large_window12to16_192to256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_large_window12to24_192to384(pretrained=False, **kwargs) -> SwinTransformerV2: """ """ model_args = dict( window_size=24, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), pretrained_window_sizes=(12, 12, 12, 6)) return _create_swin_transformer_v2( 'swinv2_large_window12to24_192to384', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'swinv2_base_window12_192_22k': 'swinv2_base_window12_192.ms_in22k', 'swinv2_base_window12to16_192to256_22kft1k': 'swinv2_base_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_base_window12to24_192to384_22kft1k': 'swinv2_base_window12to24_192to384.ms_in22k_ft_in1k', 'swinv2_large_window12_192_22k': 'swinv2_large_window12_192.ms_in22k', 'swinv2_large_window12to16_192to256_22kft1k': 'swinv2_large_window12to16_192to256.ms_in22k_ft_in1k', 'swinv2_large_window12to24_192to384_22kft1k': 'swinv2_large_window12to24_192to384.ms_in22k_ft_in1k', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/swin_transformer_v2_cr.py
""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Code adapted from https://github.com/ChristophReich1996/Swin-Transformer-V2, original copyright/license info below This implementation is experimental and subject to change in manners that will break weight compat: * Size of the pos embed MLP are not spelled out in paper in terms of dim, fixed for all models? vary with num_heads? * currently dim is fixed, I feel it may make sense to scale with num_heads (dim per head) * The specifics of the memory saving 'sequential attention' are not detailed, Christoph Reich has an impl at GitHub link above. It needs further investigation as throughput vs mem tradeoff doesn't appear beneficial. * num_heads per stage is not detailed for Huge and Giant model variants * 'Giant' is 3B params in paper but ~2.6B here despite matching paper dim + block counts * experiments are ongoing wrt to 'main branch' norm layer use and weight init scheme Noteworthy additions over official Swin v1: * MLP relative position embedding is looking promising and adapts to different image/window sizes * This impl has been designed to allow easy change of image size with matching window size changes * Non-square image size and window size are supported Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman """ # -------------------------------------------------------- # Swin Transformer V2 reimplementation # Copyright (c) 2021 Christoph Reich # Licensed under The MIT License [see LICENSE for details] # Written by Christoph Reich # -------------------------------------------------------- import logging import math from typing import Tuple, Optional, List, Union, Any, Type import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, ClassifierHead, to_2tuple, _assert from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['SwinTransformerV2Cr'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) def bchw_to_bhwc(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, C, H, W) to (B, H, W, C). """ return x.permute(0, 2, 3, 1) def bhwc_to_bchw(x: torch.Tensor) -> torch.Tensor: """Permutes a tensor from the shape (B, H, W, C) to (B, C, H, W). """ return x.permute(0, 3, 1, 2) def window_partition(x, window_size: Tuple[int, int]): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C) return windows @register_notrace_function # reason: int argument is a Proxy def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]): """ Args: windows: (num_windows * B, window_size[0], window_size[1], C) window_size (Tuple[int, int]): Window size img_size (Tuple[int, int]): Image size Returns: x: (B, H, W, C) """ H, W = img_size C = windows.shape[-1] x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C) return x class WindowMultiHeadAttention(nn.Module): r"""This class implements window-based Multi-Head-Attention with log-spaced continuous position bias. Args: dim (int): Number of input features window_size (int): Window size num_heads (int): Number of attention heads drop_attn (float): Dropout rate of attention map drop_proj (float): Dropout rate after projection meta_hidden_dim (int): Number of hidden features in the two layer MLP meta network sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, dim: int, num_heads: int, window_size: Tuple[int, int], drop_attn: float = 0.0, drop_proj: float = 0.0, meta_hidden_dim: int = 384, # FIXME what's the optimal value? sequential_attn: bool = False, ) -> None: super(WindowMultiHeadAttention, self).__init__() assert dim % num_heads == 0, \ "The number of input features (in_features) are not divisible by the number of heads (num_heads)." self.in_features: int = dim self.window_size: Tuple[int, int] = window_size self.num_heads: int = num_heads self.sequential_attn: bool = sequential_attn self.qkv = nn.Linear(in_features=dim, out_features=dim * 3, bias=True) self.attn_drop = nn.Dropout(drop_attn) self.proj = nn.Linear(in_features=dim, out_features=dim, bias=True) self.proj_drop = nn.Dropout(drop_proj) # meta network for positional encodings self.meta_mlp = Mlp( 2, # x, y hidden_features=meta_hidden_dim, out_features=num_heads, act_layer=nn.ReLU, drop=(0.125, 0.) # FIXME should there be stochasticity, appears to 'overfit' without? ) # NOTE old checkpoints used inverse of logit_scale ('tau') following the paper, see conversion fn self.logit_scale = nn.Parameter(torch.log(10 * torch.ones(num_heads))) self._make_pair_wise_relative_positions() def _make_pair_wise_relative_positions(self) -> None: """Method initializes the pair-wise relative positions to compute the positional biases.""" device = self.logit_scale.device coordinates = torch.stack(torch.meshgrid([ torch.arange(self.window_size[0], device=device), torch.arange(self.window_size[1], device=device)]), dim=0).flatten(1) relative_coordinates = coordinates[:, :, None] - coordinates[:, None, :] relative_coordinates = relative_coordinates.permute(1, 2, 0).reshape(-1, 2).float() relative_coordinates_log = torch.sign(relative_coordinates) * torch.log( 1.0 + relative_coordinates.abs()) self.register_buffer("relative_coordinates_log", relative_coordinates_log, persistent=False) def update_input_size(self, new_window_size: int, **kwargs: Any) -> None: """Method updates the window size and so the pair-wise relative positions Args: new_window_size (int): New window size kwargs (Any): Unused """ # Set new window size and new pair-wise relative positions self.window_size: int = new_window_size self._make_pair_wise_relative_positions() def _relative_positional_encodings(self) -> torch.Tensor: """Method computes the relative positional encodings Returns: relative_position_bias (torch.Tensor): Relative positional encodings (1, number of heads, window size ** 2, window size ** 2) """ window_area = self.window_size[0] * self.window_size[1] relative_position_bias = self.meta_mlp(self.relative_coordinates_log) relative_position_bias = relative_position_bias.transpose(1, 0).reshape( self.num_heads, window_area, window_area ) relative_position_bias = relative_position_bias.unsqueeze(0) return relative_position_bias def forward(self, x: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor: """ Forward pass. Args: x (torch.Tensor): Input tensor of the shape (B * windows, N, C) mask (Optional[torch.Tensor]): Attention mask for the shift case Returns: Output tensor of the shape [B * windows, N, C] """ Bw, L, C = x.shape qkv = self.qkv(x).view(Bw, L, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) query, key, value = qkv.unbind(0) # compute attention map with scaled cosine attention attn = (F.normalize(query, dim=-1) @ F.normalize(key, dim=-1).transpose(-2, -1)) logit_scale = torch.clamp(self.logit_scale.reshape(1, self.num_heads, 1, 1), max=math.log(1. / 0.01)).exp() attn = attn * logit_scale attn = attn + self._relative_positional_encodings() if mask is not None: # Apply mask if utilized num_win: int = mask.shape[0] attn = attn.view(Bw // num_win, num_win, self.num_heads, L, L) attn = attn + mask.unsqueeze(1).unsqueeze(0) attn = attn.view(-1, self.num_heads, L, L) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ value).transpose(1, 2).reshape(Bw, L, -1) x = self.proj(x) x = self.proj_drop(x) return x class SwinTransformerV2CrBlock(nn.Module): r"""This class implements the Swin transformer block. Args: dim (int): Number of input channels num_heads (int): Number of attention heads to be utilized feat_size (Tuple[int, int]): Input resolution window_size (Tuple[int, int]): Window size to be utilized shift_size (int): Shifting size to be used mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path extra_norm (bool): Insert extra norm on 'main' branch if True sequential_attn (bool): If true sequential self-attention is performed norm_layer (Type[nn.Module]): Type of normalization layer to be utilized """ def __init__( self, dim: int, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], shift_size: Tuple[int, int] = (0, 0), mlp_ratio: float = 4.0, init_values: Optional[float] = 0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: float = 0.0, extra_norm: bool = False, sequential_attn: bool = False, norm_layer: Type[nn.Module] = nn.LayerNorm, ) -> None: super(SwinTransformerV2CrBlock, self).__init__() self.dim: int = dim self.feat_size: Tuple[int, int] = feat_size self.target_shift_size: Tuple[int, int] = to_2tuple(shift_size) self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.init_values: Optional[float] = init_values # attn branch self.attn = WindowMultiHeadAttention( dim=dim, num_heads=num_heads, window_size=self.window_size, drop_attn=drop_attn, drop_proj=proj_drop, sequential_attn=sequential_attn, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # mlp branch self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), drop=proj_drop, out_features=dim, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_prob=drop_path) if drop_path > 0.0 else nn.Identity() # Extra main branch norm layer mentioned for Huge/Giant models in V2 paper. # Also being used as final network norm and optional stage ending norm while still in a C-last format. self.norm3 = norm_layer(dim) if extra_norm else nn.Identity() self._make_attention_mask() self.init_weights() def _calc_window_shift(self, target_window_size): window_size = [f if f <= w else w for f, w in zip(self.feat_size, target_window_size)] shift_size = [0 if f <= w else s for f, w, s in zip(self.feat_size, window_size, self.target_shift_size)] return tuple(window_size), tuple(shift_size) def _make_attention_mask(self) -> None: """Method generates the attention mask used in shift case.""" # Make masks for shift case if any(self.shift_size): # calculate attention mask for SW-MSA H, W = self.feat_size img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 cnt = 0 for h in ( slice(0, -self.window_size[0]), slice(-self.window_size[0], -self.shift_size[0]), slice(-self.shift_size[0], None)): for w in ( slice(0, -self.window_size[1]), slice(-self.window_size[1], -self.shift_size[1]), slice(-self.shift_size[1], None)): img_mask[:, h, w, :] = cnt cnt += 1 mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1 mask_windows = mask_windows.view(-1, self.window_area) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None self.register_buffer("attn_mask", attn_mask, persistent=False) def init_weights(self): # extra, module specific weight init if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def update_input_size(self, new_window_size: Tuple[int, int], new_feat_size: Tuple[int, int]) -> None: """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. Args: new_window_size (int): New window size new_feat_size (Tuple[int, int]): New input resolution """ # Update input resolution self.feat_size: Tuple[int, int] = new_feat_size self.window_size, self.shift_size = self._calc_window_shift(to_2tuple(new_window_size)) self.window_area = self.window_size[0] * self.window_size[1] self.attn.update_input_size(new_window_size=self.window_size) self._make_attention_mask() def _shifted_window_attn(self, x): B, H, W, C = x.shape # cyclic shift sh, sw = self.shift_size do_shift: bool = any(self.shift_size) if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, sh:], x[:, :sh]], dim=1) # x = torch.cat([x[:, :, sw:], x[:, :, :sw]], dim=2) x = torch.roll(x, shifts=(-sh, -sw), dims=(1, 2)) # partition windows x_windows = window_partition(x, self.window_size) # num_windows * B, window_size, window_size, C x_windows = x_windows.view(-1, self.window_size[0] * self.window_size[1], C) # W-MSA/SW-MSA attn_windows = self.attn(x_windows, mask=self.attn_mask) # num_windows * B, window_size * window_size, C # merge windows attn_windows = attn_windows.view(-1, self.window_size[0], self.window_size[1], C) x = window_reverse(attn_windows, self.window_size, self.feat_size) # B H' W' C # reverse cyclic shift if do_shift: # FIXME PyTorch XLA needs cat impl, roll not lowered # x = torch.cat([x[:, -sh:], x[:, :-sh]], dim=1) # x = torch.cat([x[:, :, -sw:], x[:, :, :-sw]], dim=2) x = torch.roll(x, shifts=(sh, sw), dims=(1, 2)) return x def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] Returns: output (torch.Tensor): Output tensor of the shape [B, C, H, W] """ # post-norm branches (op -> norm -> drop) x = x + self.drop_path1(self.norm1(self._shifted_window_attn(x))) B, H, W, C = x.shape x = x.reshape(B, -1, C) x = x + self.drop_path2(self.norm2(self.mlp(x))) x = self.norm3(x) # main-branch norm enabled for some blocks / stages (every 6 for Huge/Giant) x = x.reshape(B, H, W, C) return x class PatchMerging(nn.Module): """ This class implements the patch merging as a strided convolution with a normalization before. Args: dim (int): Number of input channels norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. """ def __init__(self, dim: int, norm_layer: Type[nn.Module] = nn.LayerNorm) -> None: super(PatchMerging, self).__init__() self.norm = norm_layer(4 * dim) self.reduction = nn.Linear(in_features=4 * dim, out_features=2 * dim, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] Returns: output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] """ B, H, W, C = x.shape x = x.reshape(B, H // 2, 2, W // 2, 2, C).permute(0, 1, 3, 4, 2, 5).flatten(3) x = self.norm(x) x = self.reduction(x) return x class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape _assert(H == self.img_size[0], f"Input image height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input image width ({W}) doesn't match model ({self.img_size[1]}).") x = self.proj(x) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x class SwinTransformerV2CrStage(nn.Module): r"""This class implements a stage of the Swin transformer including multiple layers. Args: embed_dim (int): Number of input channels depth (int): Depth of the stage (number of layers) downscale (bool): If true input is downsampled (see Fig. 3 or V1 paper) feat_size (Tuple[int, int]): input feature map size (H, W) num_heads (int): Number of attention heads to be utilized window_size (int): Window size to be utilized mlp_ratio (int): Ratio of the hidden dimension in the FFN to the input channels proj_drop (float): Dropout in input mapping drop_attn (float): Dropout rate of attention map drop_path (float): Dropout in main path norm_layer (Type[nn.Module]): Type of normalization layer to be utilized. Default: nn.LayerNorm extra_norm_period (int): Insert extra norm layer on main branch every N (period) blocks extra_norm_stage (bool): End each stage with an extra norm layer in main branch sequential_attn (bool): If true sequential self-attention is performed """ def __init__( self, embed_dim: int, depth: int, downscale: bool, num_heads: int, feat_size: Tuple[int, int], window_size: Tuple[int, int], mlp_ratio: float = 4.0, init_values: Optional[float] = 0.0, proj_drop: float = 0.0, drop_attn: float = 0.0, drop_path: Union[List[float], float] = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, ) -> None: super(SwinTransformerV2CrStage, self).__init__() self.downscale: bool = downscale self.grad_checkpointing: bool = False self.feat_size: Tuple[int, int] = (feat_size[0] // 2, feat_size[1] // 2) if downscale else feat_size if downscale: self.downsample = PatchMerging(embed_dim, norm_layer=norm_layer) embed_dim = embed_dim * 2 else: self.downsample = nn.Identity() def _extra_norm(index): i = index + 1 if extra_norm_period and i % extra_norm_period == 0: return True return i == depth if extra_norm_stage else False self.blocks = nn.Sequential(*[ SwinTransformerV2CrBlock( dim=embed_dim, num_heads=num_heads, feat_size=self.feat_size, window_size=window_size, shift_size=tuple([0 if ((index % 2) == 0) else w // 2 for w in window_size]), mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop, drop_attn=drop_attn, drop_path=drop_path[index] if isinstance(drop_path, list) else drop_path, extra_norm=_extra_norm(index), sequential_attn=sequential_attn, norm_layer=norm_layer, ) for index in range(depth)] ) def update_input_size(self, new_window_size: int, new_feat_size: Tuple[int, int]) -> None: """Method updates the resolution to utilize and the window size and so the pair-wise relative positions. Args: new_window_size (int): New window size new_feat_size (Tuple[int, int]): New input resolution """ self.feat_size: Tuple[int, int] = ( (new_feat_size[0] // 2, new_feat_size[1] // 2) if self.downscale else new_feat_size ) for block in self.blocks: block.update_input_size(new_window_size=new_window_size, new_feat_size=self.feat_size) def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward pass. Args: x (torch.Tensor): Input tensor of the shape [B, C, H, W] or [B, L, C] Returns: output (torch.Tensor): Output tensor of the shape [B, 2 * C, H // 2, W // 2] """ x = bchw_to_bhwc(x) x = self.downsample(x) for block in self.blocks: # Perform checkpointing if utilized if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(block, x) else: x = block(x) x = bhwc_to_bchw(x) return x class SwinTransformerV2Cr(nn.Module): r""" Swin Transformer V2 A PyTorch impl of : `Swin Transformer V2: Scaling Up Capacity and Resolution` - https://arxiv.org/pdf/2111.09883 Args: img_size: Input resolution. window_size: Window size. If None, img_size // window_div img_window_ratio: Window size to image size ratio. patch_size: Patch size. in_chans: Number of input channels. depths: Depth of the stage (number of layers). num_heads: Number of attention heads to be utilized. embed_dim: Patch embedding dimension. num_classes: Number of output classes. mlp_ratio: Ratio of the hidden dimension in the FFN to the input channels. drop_rate: Dropout rate. proj_drop_rate: Projection dropout rate. attn_drop_rate: Dropout rate of attention map. drop_path_rate: Stochastic depth rate. norm_layer: Type of normalization layer to be utilized. extra_norm_period: Insert extra norm layer on main branch every N (period) blocks in stage extra_norm_stage: End each stage with an extra norm layer in main branch sequential_attn: If true sequential self-attention is performed. """ def __init__( self, img_size: Tuple[int, int] = (224, 224), patch_size: int = 4, window_size: Optional[int] = None, img_window_ratio: int = 32, in_chans: int = 3, num_classes: int = 1000, embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), num_heads: Tuple[int, ...] = (3, 6, 12, 24), mlp_ratio: float = 4.0, init_values: Optional[float] = 0., drop_rate: float = 0.0, proj_drop_rate: float = 0.0, attn_drop_rate: float = 0.0, drop_path_rate: float = 0.0, norm_layer: Type[nn.Module] = nn.LayerNorm, extra_norm_period: int = 0, extra_norm_stage: bool = False, sequential_attn: bool = False, global_pool: str = 'avg', weight_init='skip', **kwargs: Any ) -> None: super(SwinTransformerV2Cr, self).__init__() img_size = to_2tuple(img_size) window_size = tuple([ s // img_window_ratio for s in img_size]) if window_size is None else to_2tuple(window_size) self.num_classes: int = num_classes self.patch_size: int = patch_size self.img_size: Tuple[int, int] = img_size self.window_size: int = window_size self.num_features: int = int(embed_dim * 2 ** (len(depths) - 1)) self.feature_info = [] self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, ) patch_grid_size: Tuple[int, int] = self.patch_embed.grid_size dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] stages = [] in_dim = embed_dim in_scale = 1 for stage_idx, (depth, num_heads) in enumerate(zip(depths, num_heads)): stages += [SwinTransformerV2CrStage( embed_dim=in_dim, depth=depth, downscale=stage_idx != 0, feat_size=( patch_grid_size[0] // in_scale, patch_grid_size[1] // in_scale ), num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, init_values=init_values, proj_drop=proj_drop_rate, drop_attn=attn_drop_rate, drop_path=dpr[stage_idx], extra_norm_period=extra_norm_period, extra_norm_stage=extra_norm_stage or (stage_idx + 1) == len(depths), # last stage ends w/ norm sequential_attn=sequential_attn, norm_layer=norm_layer, )] if stage_idx != 0: in_dim *= 2 in_scale *= 2 self.feature_info += [dict(num_chs=in_dim, reduction=4 * in_scale, module=f'stages.{stage_idx}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) # current weight init skips custom init and uses pytorch layer defaults, seems to work well # FIXME more experiments needed if weight_init != 'skip': named_apply(init_weights, self) def update_input_size( self, new_img_size: Optional[Tuple[int, int]] = None, new_window_size: Optional[int] = None, img_window_ratio: int = 32, ) -> None: """Method updates the image resolution to be processed and window size and so the pair-wise relative positions. Args: new_window_size (Optional[int]): New window size, if None based on new_img_size // window_div new_img_size (Optional[Tuple[int, int]]): New input resolution, if None current resolution is used img_window_ratio (int): divisor for calculating window size from image size """ # Check parameters if new_img_size is None: new_img_size = self.img_size else: new_img_size = to_2tuple(new_img_size) if new_window_size is None: new_window_size = tuple([s // img_window_ratio for s in new_img_size]) # Compute new patch resolution & update resolution of each stage new_patch_grid_size = (new_img_size[0] // self.patch_size, new_img_size[1] // self.patch_size) for index, stage in enumerate(self.stages): stage_scale = 2 ** max(index - 1, 0) stage.update_input_size( new_window_size=new_window_size, new_img_size=(new_patch_grid_size[0] // stage_scale, new_patch_grid_size[1] // stage_scale), ) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed', # stem and embed blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+).downsample', (0,)), (r'^stages\.(\d+)\.\w+\.(\d+)', None), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore() def get_classifier(self) -> nn.Module: """Method returns the classification head of the model. Returns: head (nn.Module): Current classification head """ return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None) -> None: """Method results the classification head Args: num_classes (int): Number of classes to be predicted global_pool (str): Unused """ self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.patch_embed(x) x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def init_weights(module: nn.Module, name: str = ''): # FIXME WIP determining if there's a better weight init if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) elif 'head' in name: nn.init.zeros_(module.weight) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'head.fc.weight' in state_dict: return state_dict out_dict = {} for k, v in state_dict.items(): if 'tau' in k: # convert old tau based checkpoints -> logit_scale (inverse) v = torch.log(1 / v) k = k.replace('tau', 'logit_scale') k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_swin_transformer_v2_cr(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 1, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( SwinTransformerV2Cr, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'swinv2_cr_tiny_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_tiny_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_tiny_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_tiny_ns_224-ba8166c6.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_small_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_224-0813c165.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_224.sw_in1k': _cfg( hf_hub_id='timm/', url="https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-swinv2/swin_v2_cr_small_ns_224_iv-2ce90f8e.pth", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_small_ns_256.untrained': _cfg( url="", input_size=(3, 256, 256), crop_pct=1.0, pool_size=(8, 8)), 'swinv2_cr_base_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_base_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_base_ns_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_large_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_large_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_huge_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_huge_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), 'swinv2_cr_giant_384.untrained': _cfg( url="", input_size=(3, 384, 384), crop_pct=1.0, pool_size=(12, 12)), 'swinv2_cr_giant_224.untrained': _cfg( url="", input_size=(3, 224, 224), crop_pct=0.9), }) @register_model def swinv2_cr_tiny_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_tiny_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-T V2 CR @ 224x224, trained ImageNet-1k w/ extra stage norms. ** Experimental, may make default if results are improved. ** """ model_args = dict( embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_tiny_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), ) return _create_swin_transformer_v2_cr('swinv2_cr_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_small_ns_256(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-S V2 CR @ 256x256, trained ImageNet-1k""" model_args = dict( embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_small_ns_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), ) return _create_swin_transformer_v2_cr('swinv2_cr_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_base_ns_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-B V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), extra_norm_stage=True, ) return _create_swin_transformer_v2_cr('swinv2_cr_base_ns_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_large_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-L V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), ) return _create_swin_transformer_v2_cr('swinv2_cr_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(11, 22, 44, 88), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_huge_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-H V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=352, depths=(2, 2, 18, 2), num_heads=(8, 16, 32, 64), # head count not certain for Huge, 384 & 224 trying diff values extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_384(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 384x384, trained ImageNet-1k""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_384', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def swinv2_cr_giant_224(pretrained=False, **kwargs) -> SwinTransformerV2Cr: """Swin-G V2 CR @ 224x224, trained ImageNet-1k""" model_args = dict( embed_dim=512, depths=(2, 2, 42, 2), num_heads=(16, 32, 64, 128), extra_norm_period=6, ) return _create_swin_transformer_v2_cr('swinv2_cr_giant_224', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/tnt.py
""" Transformer in Transformer (TNT) in PyTorch A PyTorch implement of TNT as described in 'Transformer in Transformer' - https://arxiv.org/abs/2103.00112 The official mindspore code is released and available at https://gitee.com/mindspore/mindspore/tree/master/model_zoo/research/cv/TNT """ import math import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, trunc_normal_, _assert, to_2tuple from ._builder import build_model_with_cfg from ._registry import register_model from .vision_transformer import resize_pos_embed __all__ = ['TNT'] # model_registry will add each entrypoint fn to this def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'pixel_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = { 'tnt_s_patch16_224': _cfg( url='https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), ), 'tnt_b_patch16_224': _cfg( mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), ), } class Attention(nn.Module): """ Multi-Head Attention """ def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.hidden_dim = hidden_dim self.num_heads = num_heads head_dim = hidden_dim // num_heads self.head_dim = head_dim self.scale = head_dim ** -0.5 self.qk = nn.Linear(dim, hidden_dim * 2, bias=qkv_bias) self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop, inplace=True) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop, inplace=True) def forward(self, x): B, N, C = x.shape qk = self.qk(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k = qk.unbind(0) # make torchscript happy (cannot use tensor as tuple) v = self.v(x).reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): """ TNT Block """ def __init__( self, dim, dim_out, num_pixel, num_heads_in=4, num_heads_out=12, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() # Inner transformer self.norm_in = norm_layer(dim) self.attn_in = Attention( dim, dim, num_heads=num_heads_in, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm_mlp_in = norm_layer(dim) self.mlp_in = Mlp( in_features=dim, hidden_features=int(dim * 4), out_features=dim, act_layer=act_layer, drop=proj_drop, ) self.norm1_proj = norm_layer(dim) self.proj = nn.Linear(dim * num_pixel, dim_out, bias=True) # Outer transformer self.norm_out = norm_layer(dim_out) self.attn_out = Attention( dim_out, dim_out, num_heads=num_heads_out, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm_mlp = norm_layer(dim_out) self.mlp = Mlp( in_features=dim_out, hidden_features=int(dim_out * mlp_ratio), out_features=dim_out, act_layer=act_layer, drop=proj_drop, ) def forward(self, pixel_embed, patch_embed): # inner pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed))) pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed))) # outer B, N, C = patch_embed.size() patch_embed = torch.cat( [patch_embed[:, 0:1], patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape(B, N - 1, -1))], dim=1) patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed))) patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed))) return pixel_embed, patch_embed class PixelEmbed(nn.Module): """ Image to Pixel Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) # grid_size property necessary for resizing positional embedding self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) num_patches = (self.grid_size[0]) * (self.grid_size[1]) self.img_size = img_size self.num_patches = num_patches self.in_dim = in_dim new_patch_size = [math.ceil(ps / stride) for ps in patch_size] self.new_patch_size = new_patch_size self.proj = nn.Conv2d(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride) self.unfold = nn.Unfold(kernel_size=new_patch_size, stride=new_patch_size) def forward(self, x, pixel_pos): B, C, H, W = x.shape _assert(H == self.img_size[0], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") _assert(W == self.img_size[1], f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]}).") x = self.proj(x) x = self.unfold(x) x = x.transpose(1, 2).reshape(B * self.num_patches, self.in_dim, self.new_patch_size[0], self.new_patch_size[1]) x = x + pixel_pos x = x.reshape(B * self.num_patches, self.in_dim, -1).transpose(1, 2) return x class TNT(nn.Module): """ Transformer in Transformer - https://arxiv.org/abs/2103.00112 """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, inner_dim=48, depth=12, num_heads_inner=4, num_heads_outer=12, mlp_ratio=4., qkv_bias=False, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4, ): super().__init__() assert global_pool in ('', 'token', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.grad_checkpointing = False self.pixel_embed = PixelEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=inner_dim, stride=first_stride, ) num_patches = self.pixel_embed.num_patches self.num_patches = num_patches new_patch_size = self.pixel_embed.new_patch_size num_pixel = new_patch_size[0] * new_patch_size[1] self.norm1_proj = norm_layer(num_pixel * inner_dim) self.proj = nn.Linear(num_pixel * inner_dim, embed_dim) self.norm2_proj = norm_layer(embed_dim) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.patch_pos = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) self.pixel_pos = nn.Parameter(torch.zeros(1, inner_dim, new_patch_size[0], new_patch_size[1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule blocks = [] for i in range(depth): blocks.append(Block( dim=inner_dim, dim_out=embed_dim, num_pixel=num_pixel, num_heads_in=num_heads_inner, num_heads_out=num_heads_outer, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, )) self.blocks = nn.ModuleList(blocks) self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.cls_token, std=.02) trunc_normal_(self.patch_pos, std=.02) trunc_normal_(self.pixel_pos, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) @torch.jit.ignore def no_weight_decay(self): return {'patch_pos', 'pixel_pos', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^cls_token|patch_pos|pixel_pos|pixel_embed|norm[12]_proj|proj', # stem and embed / pos blocks=[ (r'^blocks\.(\d+)', None), (r'^norm', (99999,)), ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'token', 'avg') self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): B = x.shape[0] pixel_embed = self.pixel_embed(x, self.pixel_pos) patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) patch_embed = torch.cat((self.cls_token.expand(B, -1, -1), patch_embed), dim=1) patch_embed = patch_embed + self.patch_pos patch_embed = self.pos_drop(patch_embed) if self.grad_checkpointing and not torch.jit.is_scripting(): for blk in self.blocks: pixel_embed, patch_embed = checkpoint(blk, pixel_embed, patch_embed) else: for blk in self.blocks: pixel_embed, patch_embed = blk(pixel_embed, patch_embed) patch_embed = self.norm(patch_embed) return patch_embed def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ convert patch embedding weight from manual patchify + linear proj to conv""" if state_dict['patch_pos'].shape != model.patch_pos.shape: state_dict['patch_pos'] = resize_pos_embed(state_dict['patch_pos'], model.patch_pos, getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size) return state_dict def _create_tnt(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg( TNT, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs) return model @register_model def tnt_s_patch16_224(pretrained=False, **kwargs) -> TNT: model_cfg = dict( patch_size=16, embed_dim=384, inner_dim=24, depth=12, num_heads_outer=6, qkv_bias=False) model = _create_tnt('tnt_s_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def tnt_b_patch16_224(pretrained=False, **kwargs) -> TNT: model_cfg = dict( patch_size=16, embed_dim=640, inner_dim=40, depth=12, num_heads_outer=10, qkv_bias=False) model = _create_tnt('tnt_b_patch16_224', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/tresnet.py
""" TResNet: High Performance GPU-Dedicated Architecture https://arxiv.org/pdf/2003.13630.pdf Original model: https://github.com/mrT23/TResNet """ from collections import OrderedDict from functools import partial import torch import torch.nn as nn from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule,\ ConvNormActAa, ConvNormAct, DropPath from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['TResNet'] # model_registry will add each entrypoint fn to this class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None, drop_path_rate=0. ): super(BasicBlock, self).__init__() self.downsample = downsample self.stride = stride act_layer = partial(nn.LeakyReLU, negative_slope=1e-3) if stride == 1: self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv1 = ConvNormActAa( inplanes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False, act_layer=None) self.act = nn.ReLU(inplace=True) rd_chs = max(planes * self.expansion // 4, 64) self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer=None, aa_layer=None, drop_path_rate=0., ): super(Bottleneck, self).__init__() self.downsample = downsample self.stride = stride act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3) self.conv1 = ConvNormAct( inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) if stride == 1: self.conv2 = ConvNormAct( planes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv2 = ConvNormActAa( planes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) reduction_chs = max(planes * self.expansion // 8, 64) self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None self.conv3 = ConvNormAct( planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act = nn.ReLU(inplace=True) def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.conv3(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class TResNet(nn.Module): def __init__( self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0., drop_path_rate=0., ): self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False super(TResNet, self).__init__() aa_layer = BlurPool2d act_layer = nn.LeakyReLU # TResnet stages self.inplanes = int(64 * width_factor) self.planes = int(64 * width_factor) if v2: self.inplanes = self.inplanes // 8 * 8 self.planes = self.planes // 8 * 8 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) layer1 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) layer2 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) layer3 = self._make_layer( Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) layer4 = self._make_layer( Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) # body self.body = nn.Sequential(OrderedDict([ ('s2d', SpaceToDepth()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4), ])) self.feature_info = [ dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), ] # head self.num_features = (self.planes * 8) * Bottleneck.expansion self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # model initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) # residual connections special initialization for m in self.modules(): if isinstance(m, BasicBlock): nn.init.zeros_(m.conv2.bn.weight) if isinstance(m, Bottleneck): nn.init.zeros_(m.conv3.bn.weight) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: layers = [] if stride == 2: # avg pooling before 1x1 conv layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [ConvNormAct( self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None)] downsample = nn.Sequential(*layers) layers = [] for i in range(blocks): layers.append(block( self.inplanes, planes, stride=stride if i == 0 else 1, downsample=downsample if i == 0 else None, use_se=use_se, aa_layer=aa_layer, drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate, )) self.inplanes = planes * block.expansion return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = self.body.s2d(x) x = self.body.conv1(x) x = checkpoint_seq([ self.body.layer1, self.body.layer2, self.body.layer3, self.body.layer4], x, flatten=True) else: x = self.body(x) return x def forward_head(self, x, pre_logits: bool = False): return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'body.conv1.conv.weight' in state_dict: return state_dict import re state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) out_dict = {} for k, v in state_dict.items(): k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) if k.endswith('bn.weight'): # convert weight from inplace_abn to batchnorm v = v.abs().add(1e-5) out_dict[k] = v return out_dict def _create_tresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( TResNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_l.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_xl.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), }) @register_model def tresnet_m(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) @register_model def tresnet_l(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) @register_model def tresnet_xl(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) @register_model def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True, **kwargs) return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **model_kwargs) register_model_deprecations(__name__, { 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', 'tresnet_m_448': 'tresnet_m.miil_in1k_448', 'tresnet_l_448': 'tresnet_l.miil_in1k_448', 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/twins.py
""" Twins A PyTorch impl of : `Twins: Revisiting the Design of Spatial Attention in Vision Transformers` - https://arxiv.org/pdf/2104.13840.pdf Code/weights from https://github.com/Meituan-AutoML/Twins, original copyright/license info below """ # -------------------------------------------------------- # Twins # Copyright (c) 2021 Meituan # Licensed under The Apache 2.0 License [see LICENSE for details] # Written by Xinjie Li, Xiangxiang Chu # -------------------------------------------------------- import math from functools import partial from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, to_2tuple, trunc_normal_, use_fused_attn from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs from .vision_transformer import Attention __all__ = ['Twins'] # model_registry will add each entrypoint fn to this Size_ = Tuple[int, int] @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class LocallyGroupedAttn(nn.Module): """ LSA: self attention within a group """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., ws=1): assert ws != 1 super(LocallyGroupedAttn, self).__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.ws = ws def forward(self, x, size: Size_): # There are two implementations for this function, zero padding or mask. We don't observe obvious difference for # both. You can choose any one, we recommend forward_padding because it's neat. However, # the masking implementation is more reasonable and accurate. B, N, C = x.shape H, W = size x = x.view(B, H, W, C) pad_l = pad_t = 0 pad_r = (self.ws - W % self.ws) % self.ws pad_b = (self.ws - H % self.ws) % self.ws x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = x.shape _h, _w = Hp // self.ws, Wp // self.ws x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) qkv = self.qkv(x).reshape( B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) q, k, v = qkv.unbind(0) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) x = x.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x # def forward_mask(self, x, size: Size_): # B, N, C = x.shape # H, W = size # x = x.view(B, H, W, C) # pad_l = pad_t = 0 # pad_r = (self.ws - W % self.ws) % self.ws # pad_b = (self.ws - H % self.ws) % self.ws # x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) # _, Hp, Wp, _ = x.shape # _h, _w = Hp // self.ws, Wp // self.ws # mask = torch.zeros((1, Hp, Wp), device=x.device) # mask[:, -pad_b:, :].fill_(1) # mask[:, :, -pad_r:].fill_(1) # # x = x.reshape(B, _h, self.ws, _w, self.ws, C).transpose(2, 3) # B, _h, _w, ws, ws, C # mask = mask.reshape(1, _h, self.ws, _w, self.ws).transpose(2, 3).reshape(1, _h * _w, self.ws * self.ws) # attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) # 1, _h*_w, ws*ws, ws*ws # attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-1000.0)).masked_fill(attn_mask == 0, float(0.0)) # qkv = self.qkv(x).reshape( # B, _h * _w, self.ws * self.ws, 3, self.num_heads, C // self.num_heads).permute(3, 0, 1, 4, 2, 5) # # n_h, B, _w*_h, nhead, ws*ws, dim # q, k, v = qkv[0], qkv[1], qkv[2] # B, _h*_w, n_head, ws*ws, dim_head # attn = (q @ k.transpose(-2, -1)) * self.scale # B, _h*_w, n_head, ws*ws, ws*ws # attn = attn + attn_mask.unsqueeze(2) # attn = attn.softmax(dim=-1) # attn = self.attn_drop(attn) # attn @v -> B, _h*_w, n_head, ws*ws, dim_head # attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.ws, self.ws, C) # x = attn.transpose(2, 3).reshape(B, _h * self.ws, _w * self.ws, C) # if pad_r > 0 or pad_b > 0: # x = x[:, :H, :W, :].contiguous() # x = x.reshape(B, N, C) # x = self.proj(x) # x = self.proj_drop(x) # return x class GlobalSubSampleAttn(nn.Module): """ GSA: using a key to summarize the information for a group to be efficient. """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, attn_drop=0., proj_drop=0., sr_ratio=1): super().__init__() assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}." self.dim = dim self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.q = nn.Linear(dim, dim, bias=True) self.kv = nn.Linear(dim, dim * 2, bias=True) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.sr_ratio = sr_ratio if sr_ratio > 1: self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) self.norm = nn.LayerNorm(dim) else: self.sr = None self.norm = None def forward(self, x, size: Size_): B, N, C = x.shape q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3) if self.sr is not None: x = x.permute(0, 2, 1).reshape(B, C, *size) x = self.sr(x).reshape(B, C, -1).permute(0, 2, 1) x = self.norm(x) kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, ws=None, ): super().__init__() self.norm1 = norm_layer(dim) if ws is None: self.attn = Attention(dim, num_heads, False, None, attn_drop, proj_drop) elif ws == 1: self.attn = GlobalSubSampleAttn(dim, num_heads, attn_drop, proj_drop, sr_ratio) else: self.attn = LocallyGroupedAttn(dim, num_heads, attn_drop, proj_drop, ws) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, size: Size_): x = x + self.drop_path1(self.attn(self.norm1(x), size)) x = x + self.drop_path2(self.mlp(self.norm2(x))) return x class PosConv(nn.Module): # PEG from https://arxiv.org/abs/2102.10882 def __init__(self, in_chans, embed_dim=768, stride=1): super(PosConv, self).__init__() self.proj = nn.Sequential( nn.Conv2d(in_chans, embed_dim, 3, stride, 1, bias=True, groups=embed_dim), ) self.stride = stride def forward(self, x, size: Size_): B, N, C = x.shape cnn_feat_token = x.transpose(1, 2).view(B, C, *size) x = self.proj(cnn_feat_token) if self.stride == 1: x += cnn_feat_token x = x.flatten(2).transpose(1, 2) return x def no_weight_decay(self): return ['proj.%d.weight' % i for i in range(4)] class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size assert img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0, \ f"img_size {img_size} should be divided by patch_size {patch_size}." self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1] self.num_patches = self.H * self.W self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) self.norm = nn.LayerNorm(embed_dim) def forward(self, x) -> Tuple[torch.Tensor, Size_]: B, C, H, W = x.shape x = self.proj(x).flatten(2).transpose(1, 2) x = self.norm(x) out_size = (H // self.patch_size[0], W // self.patch_size[1]) return x, out_size class Twins(nn.Module): """ Twins Vision Transfomer (Revisiting Spatial Attention) Adapted from PVT (PyramidVisionTransformer) class at https://github.com/whai362/PVT.git """ def __init__( self, img_size=224, patch_size=4, in_chans=3, num_classes=1000, global_pool='avg', embed_dims=(64, 128, 256, 512), num_heads=(1, 2, 4, 8), mlp_ratios=(4, 4, 4, 4), depths=(3, 4, 6, 3), sr_ratios=(8, 4, 2, 1), wss=None, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6), block_cls=Block, ): super().__init__() self.num_classes = num_classes self.global_pool = global_pool self.depths = depths self.embed_dims = embed_dims self.num_features = embed_dims[-1] self.grad_checkpointing = False img_size = to_2tuple(img_size) prev_chs = in_chans self.patch_embeds = nn.ModuleList() self.pos_drops = nn.ModuleList() for i in range(len(depths)): self.patch_embeds.append(PatchEmbed(img_size, patch_size, prev_chs, embed_dims[i])) self.pos_drops.append(nn.Dropout(p=pos_drop_rate)) prev_chs = embed_dims[i] img_size = tuple(t // patch_size for t in img_size) patch_size = 2 self.blocks = nn.ModuleList() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule cur = 0 for k in range(len(depths)): _block = nn.ModuleList([block_cls( dim=embed_dims[k], num_heads=num_heads[k], mlp_ratio=mlp_ratios[k], proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer, sr_ratio=sr_ratios[k], ws=1 if wss is None or i % 2 == 1 else wss[k]) for i in range(depths[k])], ) self.blocks.append(_block) cur += depths[k] self.pos_block = nn.ModuleList([PosConv(embed_dim, embed_dim) for embed_dim in embed_dims]) self.norm = norm_layer(self.num_features) # classification head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # init weights self.apply(self._init_weights) @torch.jit.ignore def no_weight_decay(self): return set(['pos_block.' + n for n, p in self.pos_block.named_parameters()]) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embeds.0', # stem and embed blocks=[ (r'^(?:blocks|patch_embeds|pos_block)\.(\d+)', None), ('^norm', (99999,)) ] if coarse else [ (r'^blocks\.(\d+)\.(\d+)', None), (r'^(?:patch_embeds|pos_block)\.(\d+)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels fan_out //= m.groups m.weight.data.normal_(0, math.sqrt(2.0 / fan_out)) if m.bias is not None: m.bias.data.zero_() def forward_features(self, x): B = x.shape[0] for i, (embed, drop, blocks, pos_blk) in enumerate( zip(self.patch_embeds, self.pos_drops, self.blocks, self.pos_block)): x, size = embed(x) x = drop(x) for j, blk in enumerate(blocks): x = blk(x, size) if j == 0: x = pos_blk(x, size) # PEG here if i < len(self.depths) - 1: x = x.reshape(B, *size, -1).permute(0, 3, 1, 2).contiguous() x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=1) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_twins(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(Twins, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embeds.0.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'twins_pcpvt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_pcpvt_large.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_small.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_base.in1k': _cfg(hf_hub_id='timm/'), 'twins_svt_large.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def twins_pcpvt_small(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_base(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_pcpvt_large(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4], depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_pcpvt_large', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_small(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[64, 128, 256, 512], num_heads=[2, 4, 8, 16], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 10, 4], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_small', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_base(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[96, 192, 384, 768], num_heads=[3, 6, 12, 24], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_base', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def twins_svt_large(pretrained=False, **kwargs) -> Twins: model_args = dict( patch_size=4, embed_dims=[128, 256, 512, 1024], num_heads=[4, 8, 16, 32], mlp_ratios=[4, 4, 4, 4], depths=[2, 2, 18, 2], wss=[7, 7, 7, 7], sr_ratios=[8, 4, 2, 1]) return _create_twins('twins_svt_large', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vgg.py
"""VGG Adapted from https://github.com/pytorch/vision 'vgg.py' (BSD-3-Clause) with a few changes for timm functionality. Copyright 2021 Ross Wightman """ from typing import Union, List, Dict, Any, cast import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ClassifierHead from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs __all__ = ['VGG'] cfgs: Dict[str, List[Union[str, int]]] = { 'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], 'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], } @register_notrace_module # reason: FX can't symbolically trace control flow in forward method class ConvMlp(nn.Module): def __init__( self, in_features=512, out_features=4096, kernel_size=7, mlp_ratio=1.0, drop_rate: float = 0.2, act_layer: nn.Module = None, conv_layer: nn.Module = None, ): super(ConvMlp, self).__init__() self.input_kernel_size = kernel_size mid_features = int(out_features * mlp_ratio) self.fc1 = conv_layer(in_features, mid_features, kernel_size, bias=True) self.act1 = act_layer(True) self.drop = nn.Dropout(drop_rate) self.fc2 = conv_layer(mid_features, out_features, 1, bias=True) self.act2 = act_layer(True) def forward(self, x): if x.shape[-2] < self.input_kernel_size or x.shape[-1] < self.input_kernel_size: # keep the input size >= 7x7 output_size = (max(self.input_kernel_size, x.shape[-2]), max(self.input_kernel_size, x.shape[-1])) x = F.adaptive_avg_pool2d(x, output_size) x = self.fc1(x) x = self.act1(x) x = self.drop(x) x = self.fc2(x) x = self.act2(x) return x class VGG(nn.Module): def __init__( self, cfg: List[Any], num_classes: int = 1000, in_chans: int = 3, output_stride: int = 32, mlp_ratio: float = 1.0, act_layer: nn.Module = nn.ReLU, conv_layer: nn.Module = nn.Conv2d, norm_layer: nn.Module = None, global_pool: str = 'avg', drop_rate: float = 0., ) -> None: super(VGG, self).__init__() assert output_stride == 32 self.num_classes = num_classes self.num_features = 4096 self.drop_rate = drop_rate self.grad_checkpointing = False self.use_norm = norm_layer is not None self.feature_info = [] prev_chs = in_chans net_stride = 1 pool_layer = nn.MaxPool2d layers: List[nn.Module] = [] for v in cfg: last_idx = len(layers) - 1 if v == 'M': self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{last_idx}')) layers += [pool_layer(kernel_size=2, stride=2)] net_stride *= 2 else: v = cast(int, v) conv2d = conv_layer(prev_chs, v, kernel_size=3, padding=1) if norm_layer is not None: layers += [conv2d, norm_layer(v), act_layer(inplace=True)] else: layers += [conv2d, act_layer(inplace=True)] prev_chs = v self.features = nn.Sequential(*layers) self.feature_info.append(dict(num_chs=prev_chs, reduction=net_stride, module=f'features.{len(layers) - 1}')) self.pre_logits = ConvMlp( prev_chs, self.num_features, 7, mlp_ratio=mlp_ratio, drop_rate=drop_rate, act_layer=act_layer, conv_layer=conv_layer, ) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) self._initialize_weights() @torch.jit.ignore def group_matcher(self, coarse=False): # this treats BN layers as separate groups for bn variants, a lot of effort to fix that return dict(stem=r'^features\.0', blocks=r'^features\.(\d+)') @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.head = ClassifierHead( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=self.drop_rate, ) def forward_features(self, x: torch.Tensor) -> torch.Tensor: x = self.features(x) return x def forward_head(self, x: torch.Tensor, pre_logits: bool = False): x = self.pre_logits(x) return x if pre_logits else self.head(x) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.forward_features(x) x = self.forward_head(x) return x def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def _filter_fn(state_dict): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): k_r = k k_r = k_r.replace('classifier.0', 'pre_logits.fc1') k_r = k_r.replace('classifier.3', 'pre_logits.fc2') k_r = k_r.replace('classifier.6', 'head.fc') if 'classifier.0.weight' in k: v = v.reshape(-1, 512, 7, 7) if 'classifier.3.weight' in k: v = v.reshape(-1, 4096, 1, 1) out_dict[k_r] = v return out_dict def _create_vgg(variant: str, pretrained: bool, **kwargs: Any) -> VGG: cfg = variant.split('_')[0] # NOTE: VGG is one of few models with stride==1 features w/ 6 out_indices [0..5] out_indices = kwargs.pop('out_indices', (0, 1, 2, 3, 4, 5)) model = build_model_with_cfg( VGG, variant, pretrained, model_cfg=cfgs[cfg], feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), pretrained_filter_fn=_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.0', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'vgg11.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg11_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg13_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg16_bn.tv_in1k': _cfg(hf_hub_id='timm/'), 'vgg19_bn.tv_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def vgg11(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") from `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg11', pretrained=pretrained, **model_args) @register_model def vgg11_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 11-layer model (configuration "A") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg11_bn', pretrained=pretrained, **model_args) @register_model def vgg13(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg13', pretrained=pretrained, **model_args) @register_model def vgg13_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 13-layer model (configuration "B") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg13_bn', pretrained=pretrained, **model_args) @register_model def vgg16(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg16', pretrained=pretrained, **model_args) @register_model def vgg16_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 16-layer model (configuration "D") with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg16_bn', pretrained=pretrained, **model_args) @register_model def vgg19(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration "E") `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(**kwargs) return _create_vgg('vgg19', pretrained=pretrained, **model_args) @register_model def vgg19_bn(pretrained: bool = False, **kwargs: Any) -> VGG: r"""VGG 19-layer model (configuration 'E') with batch normalization `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`._ """ model_args = dict(norm_layer=nn.BatchNorm2d, **kwargs) return _create_vgg('vgg19_bn', pretrained=pretrained, **model_args)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/visformer.py
""" Visformer Paper: Visformer: The Vision-friendly Transformer - https://arxiv.org/abs/2104.12533 From original at https://github.com/danczs/Visformer Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import to_2tuple, trunc_normal_, DropPath, PatchEmbed, LayerNorm2d, create_classifier, use_fused_attn from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['Visformer'] class SpatialMlp(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., group=8, spatial_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features drop_probs = to_2tuple(drop) self.in_features = in_features self.out_features = out_features self.spatial_conv = spatial_conv if self.spatial_conv: if group < 2: # net setting hidden_features = in_features * 5 // 6 else: hidden_features = in_features * 2 self.hidden_features = hidden_features self.group = group self.conv1 = nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0, bias=False) self.act1 = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if self.spatial_conv: self.conv2 = nn.Conv2d( hidden_features, hidden_features, 3, stride=1, padding=1, groups=self.group, bias=False) self.act2 = act_layer() else: self.conv2 = None self.act2 = None self.conv3 = nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0, bias=False) self.drop3 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.drop1(x) if self.conv2 is not None: x = self.conv2(x) x = self.act2(x) x = self.conv3(x) x = self.drop3(x) return x class Attention(nn.Module): fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, head_dim_ratio=1., attn_drop=0., proj_drop=0.): super().__init__() self.dim = dim self.num_heads = num_heads head_dim = round(dim // num_heads * head_dim_ratio) self.head_dim = head_dim self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn(experimental=True) self.qkv = nn.Conv2d(dim, head_dim * num_heads * 3, 1, stride=1, padding=0, bias=False) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Conv2d(self.head_dim * self.num_heads, dim, 1, stride=1, padding=0, bias=False) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, C, H, W = x.shape x = self.qkv(x).reshape(B, 3, self.num_heads, self.head_dim, -1).permute(1, 0, 2, 4, 3) q, k, v = x.unbind(0) if self.fused_attn: x = torch.nn.functional.scaled_dot_product_attention( q.contiguous(), k.contiguous(), v.contiguous(), dropout_p=self.attn_drop.p, ) else: attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.permute(0, 1, 3, 2).reshape(B, -1, H, W) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__( self, dim, num_heads, head_dim_ratio=1., mlp_ratio=4., proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=LayerNorm2d, group=8, attn_disabled=False, spatial_conv=False, ): super().__init__() self.spatial_conv = spatial_conv self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() if attn_disabled: self.norm1 = None self.attn = None else: self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, head_dim_ratio=head_dim_ratio, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm2 = norm_layer(dim) self.mlp = SpatialMlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, group=group, spatial_conv=spatial_conv, ) def forward(self, x): if self.attn is not None: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class Visformer(nn.Module): def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=LayerNorm2d, attn_stage='111', use_pos_embed=True, spatial_conv='111', vit_stem=False, group=8, global_pool='avg', conv_init=False, embed_norm=None, ): super().__init__() img_size = to_2tuple(img_size) self.num_classes = num_classes self.embed_dim = embed_dim self.init_channels = init_channels self.img_size = img_size self.vit_stem = vit_stem self.conv_init = conv_init if isinstance(depth, (list, tuple)): self.stage_num1, self.stage_num2, self.stage_num3 = depth depth = sum(depth) else: self.stage_num1 = self.stage_num3 = depth // 3 self.stage_num2 = depth - self.stage_num1 - self.stage_num3 self.use_pos_embed = use_pos_embed self.grad_checkpointing = False dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stage 1 if self.vit_stem: self.stem = None self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False, ) img_size = [x // patch_size for x in img_size] else: if self.init_channels is None: self.stem = None self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size // 2, in_chans=in_chans, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 2) for x in img_size] else: self.stem = nn.Sequential( nn.Conv2d(in_chans, self.init_channels, 7, stride=2, padding=3, bias=False), nn.BatchNorm2d(self.init_channels), nn.ReLU(inplace=True) ) img_size = [x // 2 for x in img_size] self.patch_embed1 = PatchEmbed( img_size=img_size, patch_size=patch_size // 4, in_chans=self.init_channels, embed_dim=embed_dim // 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 4) for x in img_size] if self.use_pos_embed: if self.vit_stem: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed1 = nn.Parameter(torch.zeros(1, embed_dim//2, *img_size)) self.pos_drop = nn.Dropout(p=pos_drop_rate) else: self.pos_embed1 = None self.stage1 = nn.Sequential(*[ Block( dim=embed_dim//2, num_heads=num_heads, head_dim_ratio=0.5, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[0] == '0'), spatial_conv=(spatial_conv[0] == '1'), ) for i in range(self.stage_num1) ]) # stage2 if not self.vit_stem: self.patch_embed2 = PatchEmbed( img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim // 2, embed_dim=embed_dim, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed2 = nn.Parameter(torch.zeros(1, embed_dim, *img_size)) else: self.pos_embed2 = None else: self.patch_embed2 = None self.stage2 = nn.Sequential(*[ Block( dim=embed_dim, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[1] == '0'), spatial_conv=(spatial_conv[1] == '1'), ) for i in range(self.stage_num1, self.stage_num1+self.stage_num2) ]) # stage 3 if not self.vit_stem: self.patch_embed3 = PatchEmbed( img_size=img_size, patch_size=patch_size // 8, in_chans=embed_dim, embed_dim=embed_dim * 2, norm_layer=embed_norm, flatten=False, ) img_size = [x // (patch_size // 8) for x in img_size] if self.use_pos_embed: self.pos_embed3 = nn.Parameter(torch.zeros(1, embed_dim*2, *img_size)) else: self.pos_embed3 = None else: self.patch_embed3 = None self.stage3 = nn.Sequential(*[ Block( dim=embed_dim * 2, num_heads=num_heads, head_dim_ratio=1.0, mlp_ratio=mlp_ratio, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, group=group, attn_disabled=(attn_stage[2] == '0'), spatial_conv=(spatial_conv[2] == '1'), ) for i in range(self.stage_num1+self.stage_num2, depth) ]) self.num_features = embed_dim if self.vit_stem else embed_dim * 2 self.norm = norm_layer(self.num_features) # head global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head # weights init if self.use_pos_embed: trunc_normal_(self.pos_embed1, std=0.02) if not self.vit_stem: trunc_normal_(self.pos_embed2, std=0.02) trunc_normal_(self.pos_embed3, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Conv2d): if self.conv_init: nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') else: trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0.) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^patch_embed1|pos_embed1|stem', # stem and embed blocks=[ (r'^stage(\d+)\.(\d+)' if coarse else r'^stage(\d+)\.(\d+)', None), (r'^(?:patch_embed|pos_embed)(\d+)', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): if self.stem is not None: x = self.stem(x) # stage 1 x = self.patch_embed1(x) if self.pos_embed1 is not None: x = self.pos_drop(x + self.pos_embed1) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage1, x) else: x = self.stage1(x) # stage 2 if self.patch_embed2 is not None: x = self.patch_embed2(x) if self.pos_embed2 is not None: x = self.pos_drop(x + self.pos_embed2) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage2, x) else: x = self.stage2(x) # stage3 if self.patch_embed3 is not None: x = self.patch_embed3(x) if self.pos_embed3 is not None: x = self.pos_drop(x + self.pos_embed3) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stage3, x) else: x = self.stage3(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_visformer(variant, pretrained=False, default_cfg=None, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(Visformer, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'visformer_tiny.in1k': _cfg(hf_hub_id='timm/'), 'visformer_small.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def visformer_tiny(pretrained=False, **kwargs) -> Visformer: model_cfg = dict( init_channels=16, embed_dim=192, depth=(7, 4, 4), num_heads=3, mlp_ratio=4., group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model @register_model def visformer_small(pretrained=False, **kwargs) -> Visformer: model_cfg = dict( init_channels=32, embed_dim=384, depth=(7, 4, 4), num_heads=6, mlp_ratio=4., group=8, attn_stage='011', spatial_conv='100', norm_layer=nn.BatchNorm2d, conv_init=True, embed_norm=nn.BatchNorm2d) model = _create_visformer('visformer_small', pretrained=pretrained, **dict(model_cfg, **kwargs)) return model # @register_model # def visformer_net1(pretrained=False, **kwargs): # model = Visformer( # init_channels=None, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=True, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net2(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=(0, 12, 0), num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net3(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net4(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., attn_stage='111', # spatial_conv='000', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net5(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', # spatial_conv='111', vit_stem=False, conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net6(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4., group=1, attn_stage='111', # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model # # # @register_model # def visformer_net7(pretrained=False, **kwargs): # model = Visformer( # init_channels=32, embed_dim=384, depth=(6, 7, 7), num_heads=6, group=1, attn_stage='000', # pos_embed=False, spatial_conv='111', conv_init=True, **kwargs) # model.default_cfg = _cfg() # return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vision_transformer.py
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 `FlexiViT: One Model for All Patch Sizes` - https://arxiv.org/abs/2212.08013 The official jax code is released and available at * https://github.com/google-research/vision_transformer * https://github.com/google-research/big_vision Acknowledgments: * The paper authors for releasing code and weights, thanks! * I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch * Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT * Bert reference code checks against Huggingface Transformers and Tensorflow Bert Hacked together by / Copyright 2020, Ross Wightman """ import logging import math from collections import OrderedDict from functools import partial from typing import Callable, List, Optional, Sequence, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from torch.jit import Final from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD, \ OPENAI_CLIP_MEAN, OPENAI_CLIP_STD from timm.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_, resample_patch_embed, \ resample_abs_pos_embed, RmsNorm, PatchDropout, use_fused_attn, SwiGLUPacked from ._builder import build_model_with_cfg from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['VisionTransformer'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, qk_norm=False, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q, k = self.q_norm(q), self.k_norm(k) if self.fused_attn: x = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x)))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.init_values = init_values self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self): # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x): x = x + self.drop_path1(self.norm1(self.attn(x))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class ParallelScalingBlock(nn.Module): """ Parallel ViT block (MLP & Attention in parallel) Based on: 'Scaling Vision Transformers to 22 Billion Parameters` - https://arxiv.org/abs/2302.05442 """ fused_attn: Final[bool] def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=None, # NOTE: not used ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() mlp_hidden_dim = int(mlp_ratio * dim) in_proj_out_dim = mlp_hidden_dim + 3 * dim self.in_norm = norm_layer(dim) self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias) self.in_split = [mlp_hidden_dim] + [dim] * 3 if qkv_bias: self.register_buffer('qkv_bias', None) self.register_parameter('mlp_bias', None) else: self.register_buffer('qkv_bias', torch.zeros(3 * dim), persistent=False) self.mlp_bias = nn.Parameter(torch.zeros(mlp_hidden_dim)) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.attn_out_proj = nn.Linear(dim, dim) self.mlp_drop = nn.Dropout(proj_drop) self.mlp_act = act_layer() self.mlp_out_proj = nn.Linear(mlp_hidden_dim, dim) self.ls = LayerScale(dim, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): B, N, C = x.shape # Combined MLP fc1 & qkv projections y = self.in_norm(x) if self.mlp_bias is not None: # Concat constant zero-bias for qkv w/ trainable mlp_bias. # Appears faster than adding to x_mlp separately y = F.linear(y, self.in_proj.weight, torch.cat((self.qkv_bias, self.mlp_bias))) else: y = self.in_proj(y) x_mlp, q, k, v = torch.split(y, self.in_split, dim=-1) # Dot product attention w/ qk norm q = self.q_norm(q.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) k = self.k_norm(k.view(B, N, self.num_heads, self.head_dim)).transpose(1, 2) v = v.view(B, N, self.num_heads, self.head_dim).transpose(1, 2) if self.fused_attn: x_attn = F.scaled_dot_product_attention( q, k, v, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x_attn = attn @ v x_attn = x_attn.transpose(1, 2).reshape(B, N, C) x_attn = self.attn_out_proj(x_attn) # MLP activation, dropout, fc2 x_mlp = self.mlp_act(x_mlp) x_mlp = self.mlp_drop(x_mlp) x_mlp = self.mlp_out_proj(x_mlp) # Add residual w/ drop path & layer scale applied y = self.drop_path(self.ls(x_attn + x_mlp)) x = x + y return x class ParallelThingsBlock(nn.Module): """ Parallel ViT block (N parallel attention followed by N parallel MLP) Based on: `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 """ def __init__( self, dim, num_heads, num_parallel=2, mlp_ratio=4., qkv_bias=False, qk_norm=False, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, ): super().__init__() self.num_parallel = num_parallel self.attns = nn.ModuleList() self.ffns = nn.ModuleList() for _ in range(num_parallel): self.attns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('attn', Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) self.ffns.append(nn.Sequential(OrderedDict([ ('norm', norm_layer(dim)), ('mlp', mlp_layer( dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, )), ('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()), ('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity()) ]))) def _forward_jit(self, x): x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0) x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0) return x @torch.jit.ignore def _forward(self, x): x = x + sum(attn(x) for attn in self.attns) x = x + sum(ffn(x) for ffn in self.ffns) return x def forward(self, x): if torch.jit.is_scripting() or torch.jit.is_tracing(): return self._forward_jit(x) else: return self._forward(x) class VisionTransformer(nn.Module): """ Vision Transformer A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - https://arxiv.org/abs/2010.11929 """ def __init__( self, img_size: Union[int, Tuple[int, int]] = 224, patch_size: Union[int, Tuple[int, int]] = 16, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'token', embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = None, class_token: bool = True, no_embed_class: bool = False, pre_norm: bool = False, fc_norm: Optional[bool] = None, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: str = '', embed_layer: Callable = PatchEmbed, norm_layer: Optional[Callable] = None, act_layer: Optional[Callable] = None, block_fn: Callable = Block, mlp_layer: Callable = Mlp, ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Mumber of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). class_token: Use class token. fc_norm: Pre head norm after pool (instead of before), if None, enabled when global_pool == 'avg'. drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. embed_layer: Patch embedding layer. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. """ super().__init__() assert global_pool in ('', 'avg', 'token') assert class_token or global_pool != 'token' use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.no_embed_class = no_embed_class self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP) ) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02) self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=self.num_prefix_tokens, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity() # Classifier Head self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'jax_nlhb', 'moco', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. trunc_normal_(self.pos_embed, std=.02) if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) named_apply(get_init_weights_vit(mode, head_bias), self) def _init_weights(self, m): # this fn left here for compat with downstream users init_weights_vit_timm(m) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix=''): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes: int, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def _pos_embed(self, x): if self.no_embed_class: # deit-3, updated JAX (big vision) # position embedding does not overlap with class token, add then concat x = x + self.pos_embed if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) else: # original timm, JAX, and deit vit impl # pos_embed has entry for class token, concat then add if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) x = x + self.pos_embed return self.pos_drop(x) def _intermediate_layers( self, x: torch.Tensor, n: Union[int, Sequence] = 1, ): outputs, num_blocks = [], len(self.blocks) take_indices = set(range(num_blocks - n, num_blocks) if isinstance(n, int) else n) # forward pass x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) for i, blk in enumerate(self.blocks): x = blk(x) if i in take_indices: outputs.append(x) return outputs def get_intermediate_layers( self, x: torch.Tensor, n: Union[int, Sequence] = 1, reshape: bool = False, return_class_token: bool = False, norm: bool = False, ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]: """ Intermediate layer accessor (NOTE: This is a WIP experiment). Inspired by DINO / DINOv2 interface """ # take last n blocks if n is an int, if in is a sequence, select by matching indices outputs = self._intermediate_layers(x, n) if norm: outputs = [self.norm(out) for out in outputs] class_tokens = [out[:, 0:self.num_prefix_tokens] for out in outputs] outputs = [out[:, self.num_prefix_tokens:] for out in outputs] if reshape: grid_size = self.patch_embed.grid_size outputs = [ out.reshape(x.shape[0], grid_size[0], grid_size[1], -1).permute(0, 3, 1, 2).contiguous() for out in outputs ] if return_class_token: return tuple(zip(outputs, class_tokens)) return tuple(outputs) def forward_features(self, x): x = self.patch_embed(x) x = self._pos_embed(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def init_weights_vit_timm(module: nn.Module, name: str = ''): """ ViT weight initialization, original timm impl (for reproducibility) """ if isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.): """ ViT weight initialization, matching JAX (Flax) impl """ if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def init_weights_vit_moco(module: nn.Module, name: str = ''): """ ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """ if isinstance(module, nn.Linear): if 'qkv' in name: # treat the weights of Q, K, V separately val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1])) nn.init.uniform_(module.weight, -val, val) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif hasattr(module, 'init_weights'): module.init_weights() def get_init_weights_vit(mode='jax', head_bias: float = 0.): if 'jax' in mode: return partial(init_weights_vit_jax, head_bias=head_bias) elif 'moco' in mode: return init_weights_vit_moco else: return init_weights_vit_timm def resize_pos_embed( posemb, posemb_new, num_prefix_tokens=1, gs_new=(), interpolation='bicubic', antialias=False, ): """ Rescale the grid of position embeddings when loading from state_dict. *DEPRECATED* This function is being deprecated in favour of resample_abs_pos_embed Adapted from: https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224 """ ntok_new = posemb_new.shape[1] if num_prefix_tokens: posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:] ntok_new -= num_prefix_tokens else: posemb_prefix, posemb_grid = posemb[:, :0], posemb[0] gs_old = int(math.sqrt(len(posemb_grid))) if not len(gs_new): # backwards compatibility gs_new = [int(math.sqrt(ntok_new))] * 2 assert len(gs_new) >= 2 _logger.info(f'Resized position embedding: {posemb.shape} ({[gs_old, gs_old]}) to {posemb_new.shape} ({gs_new}).') posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2) posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode=interpolation, antialias=antialias, align_corners=False) posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1) posemb = torch.cat([posemb_prefix, posemb_grid], dim=1) return posemb @torch.no_grad() def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): """ Load weights from .npz checkpoints for official Google Brain Flax implementation """ import numpy as np def _n2p(w, t=True): if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: w = w.flatten() if t: if w.ndim == 4: w = w.transpose([3, 2, 0, 1]) elif w.ndim == 3: w = w.transpose([2, 0, 1]) elif w.ndim == 2: w = w.transpose([1, 0]) return torch.from_numpy(w) w = np.load(checkpoint_path) interpolation = 'bilinear' antialias = False big_vision = False if not prefix: if 'opt/target/embedding/kernel' in w: prefix = 'opt/target/' elif 'params/embedding/kernel' in w: prefix = 'params/' big_vision = True if hasattr(model.patch_embed, 'backbone'): # hybrid backbone = model.patch_embed.backbone stem_only = not hasattr(backbone, 'stem') stem = backbone if stem_only else backbone.stem stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) if not stem_only: for i, stage in enumerate(backbone.stages): for j, block in enumerate(stage.blocks): bp = f'{prefix}block{i + 1}/unit{j + 1}/' for r in range(3): getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) if block.downsample is not None: block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) else: embed_conv_w = adapt_input_conv( model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) if embed_conv_w.shape[-2:] != model.patch_embed.proj.weight.shape[-2:]: embed_conv_w = resample_patch_embed( embed_conv_w, model.patch_embed.proj.weight.shape[-2:], interpolation=interpolation, antialias=antialias, verbose=True, ) model.patch_embed.proj.weight.copy_(embed_conv_w) model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) if model.cls_token is not None: model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) if big_vision: pos_embed_w = _n2p(w[f'{prefix}pos_embedding'], t=False) else: pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) if pos_embed_w.shape != model.pos_embed.shape: old_shape = pos_embed_w.shape num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) pos_embed_w = resample_abs_pos_embed( # resize pos embedding when different size from pretrained weights pos_embed_w, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) model.pos_embed.copy_(pos_embed_w) model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) # NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights # if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: # model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) # model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) mha_sub, b_sub, ln1_sub = (0, 0, 1) if big_vision else (1, 3, 2) for i, block in enumerate(model.blocks.children()): block_prefix = f'{prefix}Transformer/encoderblock_{i}/' mha_prefix = block_prefix + f'MultiHeadDotProductAttention_{mha_sub}/' block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) block.attn.qkv.weight.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) block.attn.qkv.bias.copy_(torch.cat([ _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) for r in range(2): getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/kernel'])) getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_{b_sub}/Dense_{r}/bias'])) block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/scale'])) block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_{ln1_sub}/bias'])) def _convert_openai_clip(state_dict, model): out_dict = {} swaps = [ ('visual.', ''), ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'), ('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'), ('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'), ] for k, v in state_dict.items(): if not k.startswith('visual.'): continue for sp in swaps: k = k.replace(sp[0], sp[1]) if k == 'proj': k = 'head.weight' v = v.transpose(0, 1) out_dict['head.bias'] = torch.zeros(v.shape[0]) elif k == 'class_embedding': k = 'cls_token' v = v.unsqueeze(0).unsqueeze(1) elif k == 'pos_embed': v = v.unsqueeze(0) if v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights v = resize_pos_embed( v, model.pos_embed, 0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1), model.patch_embed.grid_size ) out_dict[k] = v return out_dict def _convert_dinov2(state_dict, model): import re out_dict = {} for k, v in state_dict.items(): if k == "mask_token": continue elif re.match(r"blocks\.(\d+)\.mlp\.w12\.(?:weight|bias)", k): out_dict[k.replace("w12", "fc1")] = v continue elif re.match(r"blocks\.(\d+)\.mlp\.w3\.(?:weight|bias)", k): out_dict[k.replace("w3", "fc2")] = v continue out_dict[k] = v return out_dict def _convert_ijepa(state_dict, model): out_dict = {} for k, v in state_dict['encoder'].items(): if k.startswith('module.'): k = k[7:] if k.startswith('norm.'): k = 'fc_norm.' + k[5:] out_dict[k] = v return out_dict def checkpoint_filter_fn( state_dict, model, adapt_layer_scale=False, interpolation='bicubic', antialias=True, ): """ convert patch embedding weight from manual patchify + linear proj to conv""" import re out_dict = {} state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) if 'visual.class_embedding' in state_dict: return _convert_openai_clip(state_dict, model) if "mask_token" in state_dict: state_dict = _convert_dinov2(state_dict, model) if "encoder" in state_dict: state_dict = _convert_ijepa(state_dict, model) for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: O, I, H, W = model.patch_embed.proj.weight.shape if len(v.shape) < 4: # For old models that I trained prior to conv based patchification O, I, H, W = model.patch_embed.proj.weight.shape v = v.reshape(O, -1, H, W) if v.shape[-1] != W or v.shape[-2] != H: v = resample_patch_embed( v, (H, W), interpolation=interpolation, antialias=antialias, verbose=True, ) elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # To resize pos embedding when using model at different size from pretrained weights num_prefix_tokens = 0 if getattr(model, 'no_embed_class', False) else getattr(model, 'num_prefix_tokens', 1) v = resample_abs_pos_embed( v, new_size=model.patch_embed.grid_size, num_prefix_tokens=num_prefix_tokens, interpolation=interpolation, antialias=antialias, verbose=True, ) elif adapt_layer_scale and 'gamma_' in k: # remap layer-scale gamma into sub-module (deit3 models) k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) elif 'pre_logits' in k: # NOTE representation layer removed as not used in latest 21k/1k pretrained weights continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # re-finetuned augreg 21k FT on in1k weights 'vit_base_patch16_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), 'vit_base_patch16_384.augreg2_in21k_ft_in1k': _cfg(), 'vit_base_patch8_224.augreg2_in21k_ft_in1k': _cfg( hf_hub_id='timm/'), # How to train your ViT (augreg) weights, pretrained on 21k FT on in1k 'vit_tiny_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_tiny_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_small_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_large_patch16_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), # patch models (weights from official Google JAX impl) pretrained on in21k FT on in1k 'vit_base_patch16_224.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', hf_hub_id='timm/'), 'vit_base_patch16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_384-83fb41ba.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch32_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p32_384-9b920ba8.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), # How to train your ViT (augreg) weights trained on in1k only 'vit_small_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_small_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch32_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch32_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i1k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_base_patch16_224.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True), 'vit_base_patch16_384.augreg_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i1k-300ep-lr_0.001-aug_strong2-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', custom_load=True, input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_patch14_224.untrained': _cfg(url=''), 'vit_huge_patch14_224.untrained': _cfg(url=''), 'vit_giant_patch14_224.untrained': _cfg(url=''), 'vit_gigantic_patch14_224.untrained': _cfg(url=''), # patch models, imagenet21k (weights from official Google JAX impl) 'vit_large_patch32_224.orig_in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_patch32_224_in21k-9046d2e7.pth', hf_hub_id='timm/', num_classes=21843), 'vit_huge_patch14_224.orig_in21k': _cfg( url='https://storage.googleapis.com/vit_models/imagenet21k/ViT-H_14.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), # How to train your ViT (augreg) weights, pretrained on in21k 'vit_tiny_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_32-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_small_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_base_patch8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/B_8-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), 'vit_large_patch16_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1.npz', hf_hub_id='timm/', custom_load=True, num_classes=21843), # SAM trained models (https://arxiv.org/abs/2106.01548) 'vit_base_patch32_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_32.npz', custom_load=True, hf_hub_id='timm/'), 'vit_base_patch16_224.sam_in1k': _cfg( url='https://storage.googleapis.com/vit_models/sam/ViT-B_16.npz', custom_load=True, hf_hub_id='timm/'), # DINO pretrained - https://arxiv.org/abs/2104.14294 (no classifier head, for fine-tune only) 'vit_small_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_small_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch16_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_base_patch8_224.dino': _cfg( url='https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth', hf_hub_id='timm/', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), # DINOv2 pretrained - https://arxiv.org/abs/2304.07193 (no classifier head, for fine-tune/features only) 'vit_small_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_base_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_large_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), 'vit_giant_patch14_dinov2.lvd142m': _cfg( url='https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 518, 518), crop_pct=1.0), # ViT ImageNet-21K-P pretraining by MILL 'vit_base_patch16_224_miil.in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_in21k_miil-887286df.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear', num_classes=11221), 'vit_base_patch16_224_miil.in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/vit_base_patch16_224_1k_miil_84_4-2deb18e3.pth', hf_hub_id='timm/', mean=(0., 0., 0.), std=(1., 1., 1.), crop_pct=0.875, interpolation='bilinear'), # Custom timm variants 'vit_base_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_base_patch16_rpn_224-sw-3b07e89d.pth', hf_hub_id='timm/'), 'vit_medium_patch16_gap_240.sw_in12k': _cfg( hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=11821), 'vit_medium_patch16_gap_256.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95), 'vit_medium_patch16_gap_384.sw_in12k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=0.95, crop_mode='squash'), 'vit_base_patch16_gap_224': _cfg(), # CLIP pretrained image tower and related fine-tuned weights 'vit_base_patch32_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384)), 'vit_base_patch32_clip_448.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 448, 448)), 'vit_base_patch16_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in12k_in1k': _cfg( # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k_in1k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch32_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_base_patch16_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95), 'vit_base_patch16_clip_384.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=0.95, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.openai_ft_in12k_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch16_clip_384.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0), 'vit_large_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_huge_patch14_clip_224.laion2b_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_huge_patch14_clip_336.laion2b_ft_in1k': _cfg( hf_hub_id='', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), crop_mode='squash'), 'vit_base_patch32_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD), 'vit_base_patch16_clip_384.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 384, 384), crop_mode='squash'), 'vit_large_patch14_clip_224.openai_ft_in1k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0), 'vit_base_patch32_clip_224.laion2b_ft_in12k': _cfg( #hf_hub_id='timm/vit_base_patch32_clip_224.laion2b_ft_in12k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=11821), 'vit_huge_patch14_clip_224.laion2b_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.openai_ft_in12k': _cfg( # hf_hub_id='timm/vit_base_patch32_clip_224.openai_ft_in12k', # FIXME weight exists, need to push mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_base_patch16_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=11821), 'vit_large_patch14_clip_224.openai_ft_in12k': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=11821), 'vit_base_patch32_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-B-32-laion2B-s34B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-B-16-laion2B-s34B-b88K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_base_patch16_clip_224.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-B-16-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=512), 'vit_large_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-L-14-laion2B-s32B-b82K', hf_hub_filename='open_clip_pytorch_model.bin', mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_224.datacompxl': _cfg( hf_hub_id='laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_huge_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-H-14-laion2B-s32B-b79K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_giant_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-g-14-laion2B-s12B-b42K', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1024), 'vit_gigantic_patch14_clip_224.laion2b': _cfg( hf_hub_id='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=1280), 'vit_base_patch32_clip_224.openai': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_base_patch16_clip_224.openai': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, num_classes=512), 'vit_large_patch14_clip_224.openai': _cfg( hf_hub_id='timm/', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, num_classes=768), 'vit_large_patch14_clip_336.openai': _cfg( hf_hub_id='timm/', hf_hub_filename='open_clip_pytorch_model.bin', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, crop_pct=1.0, input_size=(3, 336, 336), num_classes=768), # experimental (may be removed) 'vit_base_patch32_plus_256.untrained': _cfg(url='', input_size=(3, 256, 256), crop_pct=0.95), 'vit_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240), crop_pct=0.95), 'vit_small_patch16_36x1_224.untrained': _cfg(url=''), 'vit_small_patch16_18x2_224.untrained': _cfg(url=''), 'vit_base_patch16_18x2_224.untrained': _cfg(url=''), # EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain # https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip 'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'eva_large_patch14_196.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 196, 196), crop_pct=1.0), 'eva_large_patch14_336.in22k_ft_in1k': _cfg( # hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt', hf_hub_id='timm/', license='mit', mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD, input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'), 'flexivit_small.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_small.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_s_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.1000ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_1000ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.300ep_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_b_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_large.1200ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.600ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_600ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_large.300ep_in1k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/flexivit_l_i1k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95), 'flexivit_base.patch16_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b16_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'flexivit_base.patch30_in21k': _cfg( url='https://storage.googleapis.com/big_vision/flexivit/vit_b30_i21k_300ep.npz', custom_load=True, hf_hub_id='timm/', input_size=(3, 240, 240), crop_pct=0.95, num_classes=21843), 'vit_base_patch16_xp_224.untrained': _cfg(url=''), 'vit_large_patch14_xp_224.untrained': _cfg(url=''), 'vit_huge_patch14_xp_224.untrained': _cfg(url=''), 'vit_base_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_large_patch16_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224.mae': _cfg( url='https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth', hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224_ijepa.in1k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.14-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch14_224_ijepa.in22k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.h.14-900e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_huge_patch16_448_ijepa.in1k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN1K-vit.h.16-448px-300e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', input_size=(3, 448, 448), crop_pct=1.0, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), 'vit_gigantic_patch16_224_ijepa.in22k': _cfg( url='https://dl.fbaipublicfiles.com/ijepa/IN22K-vit.g.16-600e.pth.tar', # hf_hub_id='timm/', license='cc-by-nc-4.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0), }) def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') if 'flexi' in variant: # FIXME Google FlexiViT pretrained models have a strong preference for bilinear patch / embed # interpolation, other pretrained models resize better w/ anti-aliased bicubic interpolation. _filter_fn = partial(checkpoint_filter_fn, interpolation='bilinear', antialias=False) else: _filter_fn = checkpoint_filter_fn return build_model_with_cfg( VisionTransformer, variant, pretrained, pretrained_filter_fn=_filter_fn, **kwargs, ) @register_model def vit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Tiny (Vit-Ti/16) """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Tiny (Vit-Ti/16) @ 384x384. """ model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/32) """ model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch32_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/32) at 384x384. """ model_args = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/16) """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/16) """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch8_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small (ViT-S/8) """ model_args = dict(patch_size=8, embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer('vit_small_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch8_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. """ model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch32_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) """ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16) model = _create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 """ model_args = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16) model = _create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 """ model_args = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16) model = _create_vision_transformer( 'vit_gigantic_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_224_miil(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False) model = _create_vision_transformer( 'vit_base_patch16_224_miil', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_240(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 240x240 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_256(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 256x256 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_medium_patch16_gap_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Medium (ViT-M/16) w/o class token, w/ avg-pool @ 384x384 """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, class_token=False, global_pool='avg', qkv_bias=False, init_values=1e-6, fc_norm=False) model = _create_vision_transformer( 'vit_medium_patch16_gap_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_gap_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) w/o class token, w/ avg-pool @ 256x256 """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=16, class_token=False, global_pool='avg', fc_norm=False) model = _create_vision_transformer( 'vit_base_patch16_gap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 224x224 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_base_patch32_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 384x384 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_base_patch32_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch32_clip_448(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/32 CLIP image tower @ 448x448 """ model_args = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_base_patch32_clip_448', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/16 CLIP image tower """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_base_patch16_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_clip_384(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/16 CLIP image tower @ 384x384 """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_base_patch16_clip_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower """ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_large_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_clip_336(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) CLIP image tower @ 336x336 """ model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_large_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower. """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_huge_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_clip_336(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) CLIP image tower @ 336x336 """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_huge_patch14_clip_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 Pretrained weights from CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_giant_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch14_clip_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-bigG model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560 Pretrained weights from CLIP image tower. """ model_args = dict( patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, pre_norm=True, norm_layer=nn.LayerNorm) model = _create_vision_transformer( 'vit_gigantic_patch14_clip_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model # Experimental models below @register_model def vit_base_patch32_plus_256(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/32+) """ model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) model = _create_vision_transformer( 'vit_base_patch32_plus_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16+) """ model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14, init_values=1e-5) model = _create_vision_transformer( 'vit_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base (ViT-B/16) w/ residual post-norm """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, init_values=1e-5, class_token=False, block_fn=ResPostBlock, global_pool='avg') model = _create_vision_transformer( 'vit_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_36x1_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base w/ LayerScale + 36 x 1 (36 block serial) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. """ model_args = dict(patch_size=16, embed_dim=384, depth=36, num_heads=6, init_values=1e-5) model = _create_vision_transformer( 'vit_small_patch16_36x1_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch16_18x2_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Small w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 Paper focuses on 24x2 + 48x1 for 'Small' width but those are extremely slow. """ model_args = dict( patch_size=16, embed_dim=384, depth=18, num_heads=6, init_values=1e-5, block_fn=ParallelThingsBlock) model = _create_vision_transformer( 'vit_small_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_18x2_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Base w/ LayerScale + 18 x 2 (36 block parallel) config. Experimental, may remove. Based on `Three things everyone should know about Vision Transformers` - https://arxiv.org/abs/2203.09795 """ model_args = dict( patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelThingsBlock) model = _create_vision_transformer( 'vit_base_patch16_18x2_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_196(pretrained=False, **kwargs) -> VisionTransformer: """ EVA-large model https://arxiv.org/abs/2211.07636 /via MAE MIM pretrain""" model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer( 'eva_large_patch14_196', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def eva_large_patch14_336(pretrained=False, **kwargs) -> VisionTransformer: """ EVA-large model https://arxiv.org/abs/2211.07636 via MAE MIM pretrain""" model_args = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg') model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_small(pretrained=False, **kwargs) -> VisionTransformer: """ FlexiViT-Small """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True) model = _create_vision_transformer('flexivit_small', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_base(pretrained=False, **kwargs) -> VisionTransformer: """ FlexiViT-Base """ model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True) model = _create_vision_transformer('flexivit_base', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def flexivit_large(pretrained=False, **kwargs) -> VisionTransformer: """ FlexiViT-Large """ model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True) model = _create_vision_transformer('flexivit_large', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch16_xp_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_base_patch16_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_xp_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Large model (ViT-L/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_large_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_xp_224(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) w/ parallel blocks and qk norm enabled. """ model_args = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, pre_norm=True, no_embed_class=True, norm_layer=RmsNorm, block_fn=ParallelScalingBlock, qkv_bias=False, qk_norm=True, ) model = _create_vision_transformer( 'vit_huge_patch14_xp_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-S/14 for DINOv2 """ model_args = dict( patch_size=14, embed_dim=384, depth=12, num_heads=6, init_values=1e-5, img_size=518, ) model = _create_vision_transformer( 'vit_small_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-B/14 for DINOv2 """ model_args = dict( patch_size=14, embed_dim=768, depth=12, num_heads=12, init_values=1e-5, img_size=518, ) model = _create_vision_transformer( 'vit_base_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-L/14 for DINOv2 """ model_args = dict( patch_size=14, embed_dim=1024, depth=24, num_heads=16, init_values=1e-5, img_size=518, ) model = _create_vision_transformer( 'vit_large_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_giant_patch14_dinov2(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-G/14 for DINOv2 """ # The hidden_features of SwiGLU is calculated by: # hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8 # When embed_dim=1536, hidden_features=4096 # With SwiGLUPacked, we need to set hidden_features = 2 * 4096 = 8192 model_args = dict( patch_size=14, embed_dim=1536, depth=40, num_heads=24, init_values=1e-5, mlp_ratio=2.66667 * 2, mlp_layer=SwiGLUPacked, img_size=518, act_layer=nn.SiLU ) model = _create_vision_transformer( 'vit_giant_patch14_dinov2', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch14_224_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/14) from `I-JEPA` - https://arxiv.org/abs/2301.08243 """ model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg') model = _create_vision_transformer( 'vit_huge_patch14_224_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_huge_patch16_448_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Huge model (ViT-H/16) from `I-JEPA` - https://arxiv.org/abs/2301.08243 """ model_args = dict( patch_size=16, embed_dim=1280, depth=32, num_heads=16, class_token=False, global_pool='avg', img_size=448) model = _create_vision_transformer( 'vit_huge_patch16_448_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_gigantic_patch16_224_ijepa(pretrained=False, **kwargs) -> VisionTransformer: """ ViT-Gigantic (big-G) model (ViT-G/16) from `I-JEPA - https://arxiv.org/abs/2301.08243 """ model_args = dict(patch_size=16, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16) model = _create_vision_transformer( 'vit_gigantic_patch16_224_ijepa', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'vit_tiny_patch16_224_in21k': 'vit_tiny_patch16_224.augreg_in21k', 'vit_small_patch32_224_in21k': 'vit_small_patch32_224.augreg_in21k', 'vit_small_patch16_224_in21k': 'vit_small_patch16_224.augreg_in21k', 'vit_base_patch32_224_in21k': 'vit_base_patch32_224.augreg_in21k', 'vit_base_patch16_224_in21k': 'vit_base_patch16_224.augreg_in21k', 'vit_base_patch8_224_in21k': 'vit_base_patch8_224.augreg_in21k', 'vit_large_patch32_224_in21k': 'vit_large_patch32_224.orig_in21k', 'vit_large_patch16_224_in21k': 'vit_large_patch16_224.augreg_in21k', 'vit_huge_patch14_224_in21k': 'vit_huge_patch14_224.orig_in21k', 'vit_base_patch32_224_sam': 'vit_base_patch32_224.sam', 'vit_base_patch16_224_sam': 'vit_base_patch16_224.sam', 'vit_small_patch16_224_dino': 'vit_small_patch16_224.dino', 'vit_small_patch8_224_dino': 'vit_small_patch8_224.dino', 'vit_base_patch16_224_dino': 'vit_base_patch16_224.dino', 'vit_base_patch8_224_dino': 'vit_base_patch8_224.dino', 'vit_base_patch16_224_miil_in21k': 'vit_base_patch16_224_miil.in21k', 'vit_base_patch32_224_clip_laion2b': 'vit_base_patch32_clip_224.laion2b', 'vit_large_patch14_224_clip_laion2b': 'vit_large_patch14_clip_224.laion2b', 'vit_huge_patch14_224_clip_laion2b': 'vit_huge_patch14_clip_224.laion2b', 'vit_giant_patch14_224_clip_laion2b': 'vit_giant_patch14_clip_224.laion2b', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vision_transformer_hybrid.py
""" Hybrid Vision Transformer (ViT) in PyTorch A PyTorch implement of the Hybrid Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 NOTE These hybrid model definitions depend on code in vision_transformer.py. They were moved here to keep file sizes sane. Hacked together by / Copyright 2020, Ross Wightman """ from functools import partial from typing import List, Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import StdConv2dSame, StdConv2d, to_2tuple from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .resnet import resnet26d, resnet50d from .resnetv2 import ResNetV2, create_resnetv2_stem from .vision_transformer import _create_vision_transformer, VisionTransformer class HybridEmbed(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ def __init__( self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768, bias=True, ): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.backbone = backbone if feature_size is None: with torch.no_grad(): # NOTE Most reliable way of determining output dims is to run forward pass training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) if isinstance(o, (list, tuple)): o = o[-1] # last feature if backbone outputs list/tuple of features feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) if hasattr(self.backbone, 'feature_info'): feature_dim = self.backbone.feature_info.channels()[-1] else: feature_dim = self.backbone.num_features assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) def forward(self, x): x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] # last feature if backbone outputs list/tuple of features x = self.proj(x) x = x.flatten(2).transpose(1, 2) return x class HybridEmbedWithSize(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ def __init__( self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768, bias=True, ): super().__init__( backbone=backbone, img_size=img_size, patch_size=patch_size, feature_size=feature_size, in_chans=in_chans, embed_dim=embed_dim, bias=bias, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] # last feature if backbone outputs list/tuple of features x = self.proj(x) return x.flatten(2).transpose(1, 2), x.shape[-2:] def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): embed_layer = partial(HybridEmbed, backbone=backbone) kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set return _create_vision_transformer(variant, pretrained=pretrained, embed_layer=embed_layer, **kwargs) def _resnetv2(layers=(3, 4, 9), **kwargs): """ ResNet-V2 backbone helper""" padding_same = kwargs.get('padding_same', True) stem_type = 'same' if padding_same else '' conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) if len(layers): backbone = ResNetV2( layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer) else: backbone = create_resnetv2_stem( kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) return backbone def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # hybrid in-1k models (weights from official JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, first_conv='patch_embed.backbone.conv'), 'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_base_r26_s32_224.untrained': _cfg(), 'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True, ), # hybrid in-21k models (weights from official Google JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True), 'vit_small_r26_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), 'vit_base_r50_s16_224.orig_in21k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9), 'vit_large_r50_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), # hybrid models (using timm resnet backbones) 'vit_small_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_small_resnet50d_s16_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet50d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), }) @register_model def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-B/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k', 'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k', 'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k', 'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k' })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vision_transformer_relpos.py
""" Relative Position Vision Transformer (ViT) in PyTorch NOTE: these models are experimental / WIP, expect changes Hacked together by / Copyright 2022, Ross Wightman """ import logging import math from functools import partial from typing import Optional, Tuple import torch import torch.nn as nn from torch.jit import Final from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model __all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class RelPosAttention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, qk_norm=False, rel_pos_cls=None, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q = self.q_norm(q) k = self.k_norm(k) if self.fused_attn: if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos else: attn_bias = None x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostRelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.init_values = init_values self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self): # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class VisionTransformerRelPos(nn.Module): """ Vision Transformer w/ Relative Position Bias Differing from classic vit, this impl * uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed * defaults to no class token (can be enabled) * defaults to global avg pool for head (can be changed) * layer-scale (residual branch gain) enabled """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='avg', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_norm=False, init_values=1e-6, class_token=False, fc_norm=False, rel_pos_type='mlp', rel_pos_dim=None, shared_rel_pos=False, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., weight_init='skip', embed_layer=PatchEmbed, norm_layer=None, act_layer=None, block_fn=RelPosBlock ): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head global_pool (str): type of global pooling for final sequence (default: 'avg') embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_norm (bool): Enable normalization of query and key in attention init_values: (float): layer-scale init values class_token (bool): use class token (default: False) fc_norm (bool): use pre classifier norm instead of pre-pool rel_pos_ty pe (str): type of relative position shared_rel_pos (bool): share relative pos across all blocks drop_rate (float): dropout rate proj_drop_rate (float): projection dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate weight_init (str): weight init scheme embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer act_layer: (nn.Module): MLP activation layer """ super().__init__() assert global_pool in ('', 'avg', 'token') assert class_token or global_pool != 'token' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) feat_size = self.patch_embed.grid_size rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) if rel_pos_type.startswith('mlp'): if rel_pos_dim: rel_pos_args['hidden_dim'] = rel_pos_dim if 'swin' in rel_pos_type: rel_pos_args['mode'] = 'swin' rel_pos_cls = partial(RelPosMlp, **rel_pos_args) else: rel_pos_cls = partial(RelPosBias, **rel_pos_args) self.shared_rel_pos = None if shared_rel_pos: self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) # NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both... rel_pos_cls = None self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() # Classifier Head self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'moco', '') if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) # FIXME weight init scheme using PyTorch defaults curently #named_apply(get_init_weights_vit(mode, head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes: int, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) else: x = blk(x, shared_rel_pos=shared_rel_pos) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', hf_hub_id='timm/', input_size=(3, 256, 256)), 'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)), 'vit_relpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth', hf_hub_id='timm/'), 'vit_srelpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth', hf_hub_id='timm/'), 'vit_srelpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_cls_224.untrained': _cfg(), 'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth', hf_hub_id='timm/'), 'vit_relpos_small_patch16_rpn_224.untrained': _cfg(), 'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_rpn_224.untrained': _cfg(), }) @register_model def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, rel_pos_dim=384, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=512, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-M/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=256, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled Leaving here for comparisons w/ a future re-train as it performs quite well. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vision_transformer_sam.py
""" Vision Transformer (ViT) in PyTorch A PyTorch implement of Vision Transformers as described in: 'Exploring Plain Vision Transformer Backbones for Object Detection' - https://arxiv.org/abs/2203.16527 'Segment Anything Model (SAM)' - https://github.com/facebookresearch/segment-anything/ """ import logging from functools import partial from typing import Callable, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, PatchDropout, LayerNorm2d, ClassifierHead, NormMlpClassifierHead,\ Format, resample_abs_pos_embed_nhwc from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model # model_registry will add each entrypoint fn to this __all__ = ['VisionTransformerSAM'] _logger = logging.getLogger(__name__) class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=True, qk_norm=False, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, input_size: Optional[Tuple[int, int]] = None, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.use_rel_pos = use_rel_pos if self.use_rel_pos: assert ( input_size is not None ), "Input size must be provided if using relative positional encoding." # initialize relative positional embeddings self.rel_pos_h = nn.Parameter(torch.zeros( 2 * input_size[0] - 1, self.head_dim)) self.rel_pos_w = nn.Parameter(torch.zeros( 2 * input_size[1] - 1, self.head_dim)) def forward(self, x): B, H, W, _ = x.shape qkv = self.qkv(x).reshape( B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # qkv with shape (3, B, nHead, H * W, C) q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) # q, k, v with shape (B * nHead, H * W, C) q, k = self.q_norm(q), self.k_norm(k) q = q * self.scale attn = q @ k.transpose(-2, -1) if self.use_rel_pos: attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) x = self.proj(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=True, qk_norm=False, proj_drop=0., attn_drop=0., init_values=None, drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, mlp_layer=Mlp, use_rel_pos=False, window_size=0, input_size=None, ): super().__init__() self.window_size = window_size self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, attn_drop=attn_drop, proj_drop=proj_drop, norm_layer=norm_layer, use_rel_pos=use_rel_pos, input_size=input_size if window_size == 0 else (window_size, window_size), ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = mlp_layer( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x x = self.norm1(x) # Window partition if self.window_size > 0: H, W = x.shape[1], x.shape[2] x, pad_hw = window_partition(x, self.window_size) x = self.drop_path1(self.ls1(self.attn(x))) # Reverse window partition if self.window_size > 0: x = window_unpartition(x, self.window_size, pad_hw, (H, W)) x = shortcut + x x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size if pad_h > 0 or pad_w > 0: x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows, (Hp, Wp) def window_unpartition( windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] ) -> torch.Tensor: """ Window unpartition into original sequences and removing padding. Args: windows (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ Hp, Wp = pad_hw H, W = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) if Hp > H or Wp > W: x = x[:, :H, :W, :].contiguous() return x def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of query q. k_size (int): size of key k. rel_pos (Tensor): relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos( attn: torch.Tensor, q: torch.Tensor, rel_pos_h: torch.Tensor, rel_pos_w: torch.Tensor, q_size: Tuple[int, int], k_size: Tuple[int, int], ) -> torch.Tensor: """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings. """ q_h, q_w = q_size k_h, k_w = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn = ( attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] ).view(B, q_h * q_w, k_h * k_w) return attn class VisionTransformerSAM(nn.Module): """ Vision Transformer for Segment-Anything Model(SAM) A PyTorch impl of : `Exploring Plain Vision Transformer Backbones for Object Detection` or `Segment Anything Model (SAM)` - https://arxiv.org/abs/2010.11929 """ def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, num_classes: int = 768, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4., qkv_bias: bool = True, qk_norm: bool = False, init_values: Optional[float] = None, pre_norm: bool = False, drop_rate: float = 0., pos_drop_rate: float = 0., patch_drop_rate: float = 0., proj_drop_rate: float = 0., attn_drop_rate: float = 0., drop_path_rate: float = 0., weight_init: str = '', embed_layer: Callable = partial( PatchEmbed, output_fmt=Format.NHWC, strict_img_size=False), norm_layer: Optional[Callable] = nn.LayerNorm, act_layer: Optional[Callable] = nn.GELU, block_fn: Callable = Block, mlp_layer: Callable = Mlp, use_abs_pos: bool = True, use_rel_pos: bool = False, window_size: int = 14, global_attn_indexes: Tuple[int, ...] = (), neck_chans: int = 256, global_pool: str = 'avg', head_hidden_size: Optional[int] = None ): """ Args: img_size: Input image size. patch_size: Patch size. in_chans: Number of image input channels. num_classes: Mumber of classes for classification head. global_pool: Type of global pooling for final sequence (default: 'token'). embed_dim: Transformer embedding dimension. depth: Depth of transformer. num_heads: Number of attention heads. mlp_ratio: Ratio of mlp hidden dim to embedding dim. qkv_bias: Enable bias for qkv projections if True. init_values: Layer-scale init values (layer-scale enabled if not None). drop_rate: Head dropout rate. pos_drop_rate: Position embedding dropout rate. attn_drop_rate: Attention dropout rate. drop_path_rate: Stochastic depth rate. weight_init: Weight initialization scheme. embed_layer: Patch embedding layer. norm_layer: Normalization layer. act_layer: MLP activation layer. block_fn: Transformer block layer. use_abs_pos: If True, use absolute positional embeddings. use_rel_pos: If True, add relative positional embeddings to the attention map. window_size: Window size for window attention blocks. If 0, not use window attention. global_attn_indexes: Indexes for blocks using global attention. Used when window_size > 0. global_pool: Global pooling type. head_hidden_size: If set, use NormMlpHead """ super().__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool # num_features for consistency with other models self.num_features = self.embed_dim = embed_dim self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, bias=not pre_norm, # disable bias if pre-norm is used ) grid_size = self.patch_embed.grid_size if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter(torch.zeros(1, grid_size[0], grid_size[1], embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) if patch_drop_rate > 0: self.patch_drop = PatchDropout( patch_drop_rate, num_prefix_tokens=0, ) else: self.patch_drop = nn.Identity() self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity() # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] self.blocks = nn.Sequential(*[ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, mlp_layer=mlp_layer, use_rel_pos=use_rel_pos, window_size=window_size if i not in global_attn_indexes else 0, input_size=grid_size, ) for i in range(depth)]) if neck_chans: self.neck = nn.Sequential( nn.Conv2d( embed_dim, neck_chans, kernel_size=1, bias=False, ), LayerNorm2d(neck_chans), nn.Conv2d( neck_chans, neck_chans, kernel_size=3, padding=1, bias=False, ), LayerNorm2d(neck_chans), ) self.num_features = neck_chans else: self.neck = nn.Identity() neck_chans = embed_dim # Classifier Head if head_hidden_size: self.head = NormMlpClassifierHead( neck_chans, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, ) else: self.head = ClassifierHead( neck_chans, num_classes, pool_type=global_pool, drop_rate=drop_rate, ) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'dist_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^pos_embed|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes=0, global_pool=None): self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.patch_embed(x) if self.pos_embed is not None: # dynamically resize abs pos embedding if needed x = x + resample_abs_pos_embed_nhwc(self.pos_embed, x.shape[1:3]) x = self.pos_drop(x) x = self.patch_drop(x) x = self.norm_pre(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.neck(x.permute(0, 3, 1, 2)) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn( state_dict, model, ): """ Remap SAM checkpoints -> timm """ sam_checkpoint = 'image_encoder.patch_embed.proj.weight' in state_dict out_dict = {} for k, v in state_dict.items(): if k.startswith('image_encoder.'): k = k[14:] k = k.replace('mlp.lin', 'mlp.fc') else: if sam_checkpoint: continue out_dict[k] = v return out_dict def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 1024, 1024), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # Segment-Anyhing Model (SAM) pretrained - https://github.com/facebookresearch/segment-anything (no classifier head, for fine-tune/features only) 'samvit_base_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_large_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), 'samvit_huge_patch16.sa1b': _cfg( url='https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth', hf_hub_id='timm/', license='apache-2.0', mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, num_classes=0, input_size=(3, 1024, 1024), crop_pct=1.0), }) def _create_vision_transformer(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError( 'features_only not implemented for Vision Transformer models.') return build_model_with_cfg( VisionTransformerSAM, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) @register_model def samvit_base_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-B/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, global_attn_indexes=[2, 5, 8, 11], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_base_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_large_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-L/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, global_attn_indexes=[5, 11, 17, 23], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_large_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def samvit_huge_patch16(pretrained=False, **kwargs) -> VisionTransformerSAM: """ ViT-H/16 for Segment-Anything """ model_args = dict( patch_size=16, embed_dim=1280, depth=32, num_heads=16, global_attn_indexes=[7, 15, 23, 31], window_size=14, use_rel_pos=True, img_size=1024, ) model = _create_vision_transformer( 'samvit_huge_patch16', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/volo.py
""" Vision OutLOoker (VOLO) implementation Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112 Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below Modifications and additions for timm by / Copyright 2022, Ross Wightman """ # Copyright 2021 Sea Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_ from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs __all__ = ['VOLO'] # model_registry will add each entrypoint fn to this class OutlookAttention(nn.Module): def __init__( self, dim, num_heads, kernel_size=3, padding=1, stride=1, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() head_dim = dim // num_heads self.num_heads = num_heads self.kernel_size = kernel_size self.padding = padding self.stride = stride self.scale = head_dim ** -0.5 self.v = nn.Linear(dim, dim, bias=qkv_bias) self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride) self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True) def forward(self, x): B, H, W, C = x.shape v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W h, w = math.ceil(H / self.stride), math.ceil(W / self.stride) v = self.unfold(v).reshape( B, self.num_heads, C // self.num_heads, self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) attn = self.attn(attn).reshape( B, h * w, self.num_heads, self.kernel_size * self.kernel_size, self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk attn = attn * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w) x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride) x = self.proj(x.permute(0, 2, 3, 1)) x = self.proj_drop(x) return x class Outlooker(nn.Module): def __init__( self, dim, kernel_size, padding, stride=1, num_heads=1, mlp_ratio=3., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, qkv_bias=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = OutlookAttention( dim, num_heads, kernel_size=kernel_size, padding=padding, stride=stride, qkv_bias=qkv_bias, attn_drop=attn_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, ) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, H, W, C = x.shape qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, H, W, C) x = self.proj(x) x = self.proj_drop(x) return x class Transformer(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer) def forward(self, x): x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ClassAttention(nn.Module): def __init__( self, dim, num_heads=8, head_dim=None, qkv_bias=False, attn_drop=0., proj_drop=0., ): super().__init__() self.num_heads = num_heads if head_dim is not None: self.head_dim = head_dim else: head_dim = dim // num_heads self.head_dim = head_dim self.scale = head_dim ** -0.5 self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias) self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(self.head_dim * self.num_heads, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) k, v = kv.unbind(0) q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) attn = ((q * self.scale) @ k.transpose(-2, -1)) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads) cls_embed = self.proj(cls_embed) cls_embed = self.proj_drop(cls_embed) return cls_embed class ClassBlock(nn.Module): def __init__( self, dim, num_heads, head_dim=None, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttention( dim, num_heads=num_heads, head_dim=head_dim, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, ) # NOTE: drop path for stochastic depth self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, ) def forward(self, x): cls_embed = x[:, :1] cls_embed = cls_embed + self.drop_path(self.attn(self.norm1(x))) cls_embed = cls_embed + self.drop_path(self.mlp(self.norm2(cls_embed))) return torch.cat([cls_embed, x[:, 1:]], dim=1) def get_block(block_type, **kargs): if block_type == 'ca': return ClassBlock(**kargs) def rand_bbox(size, lam, scale=1): """ get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling) return: bounding box """ W = size[1] // scale H = size[2] // scale cut_rat = np.sqrt(1. - lam) cut_w = (W * cut_rat).astype(int) cut_h = (H * cut_rat).astype(int) # uniform cx = np.random.randint(W) cy = np.random.randint(H) bbx1 = np.clip(cx - cut_w // 2, 0, W) bby1 = np.clip(cy - cut_h // 2, 0, H) bbx2 = np.clip(cx + cut_w // 2, 0, W) bby2 = np.clip(cy + cut_h // 2, 0, H) return bbx1, bby1, bbx2, bby2 class PatchEmbed(nn.Module): """ Image to Patch Embedding. Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding """ def __init__( self, img_size=224, stem_conv=False, stem_stride=1, patch_size=8, in_chans=3, hidden_dim=64, embed_dim=384, ): super().__init__() assert patch_size in [4, 8, 16] if stem_conv: self.conv = nn.Sequential( nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112 nn.BatchNorm2d(hidden_dim), nn.ReLU(inplace=True), ) else: self.conv = None self.proj = nn.Conv2d( hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride) self.num_patches = (img_size // patch_size) * (img_size // patch_size) def forward(self, x): if self.conv is not None: x = self.conv(x) x = self.proj(x) # B, C, H, W return x class Downsample(nn.Module): """ Image to Patch Embedding, downsampling between stage1 and stage2 """ def __init__(self, in_embed_dim, out_embed_dim, patch_size=2): super().__init__() self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.proj(x) # B, C, H, W x = x.permute(0, 2, 3, 1) return x def outlooker_blocks( block_fn, index, dim, layers, num_heads=1, kernel_size=3, padding=1, stride=2, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate outlooker layer in stage1 return: outlooker layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, kernel_size=kernel_size, padding=padding, stride=stride, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks def transformer_blocks( block_fn, index, dim, layers, num_heads, mlp_ratio=3., qkv_bias=False, attn_drop=0, drop_path_rate=0., **kwargs, ): """ generate transformer layers in stage2 return: transformer layers """ blocks = [] for block_idx in range(layers[index]): block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1) blocks.append(block_fn( dim, num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, attn_drop=attn_drop, drop_path=block_dpr, )) blocks = nn.Sequential(*blocks) return blocks class VOLO(nn.Module): """ Vision Outlooker, the main class of our model """ def __init__( self, layers, img_size=224, in_chans=3, num_classes=1000, global_pool='token', patch_size=8, stem_hidden_dim=64, embed_dims=None, num_heads=None, downsamples=(True, False, False, False), outlook_attention=(True, False, False, False), mlp_ratio=3.0, qkv_bias=False, drop_rate=0., pos_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, post_layers=('ca', 'ca'), use_aux_head=True, use_mix_token=False, pooling_scale=2, ): super().__init__() num_layers = len(layers) mlp_ratio = to_ntuple(num_layers)(mlp_ratio) img_size = to_2tuple(img_size) self.num_classes = num_classes self.global_pool = global_pool self.mix_token = use_mix_token self.pooling_scale = pooling_scale self.num_features = embed_dims[-1] if use_mix_token: # enable token mixing, see token labeling for details. self.beta = 1.0 assert global_pool == 'token', "return all tokens if mix_token is enabled" self.grad_checkpointing = False self.patch_embed = PatchEmbed( stem_conv=True, stem_stride=2, patch_size=patch_size, in_chans=in_chans, hidden_dim=stem_hidden_dim, embed_dim=embed_dims[0], ) # inital positional encoding, we add positional encoding after outlooker blocks patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale) self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1])) self.pos_drop = nn.Dropout(p=pos_drop_rate) # set the main block in network network = [] for i in range(len(layers)): if outlook_attention[i]: # stage 1 stage = outlooker_blocks( Outlooker, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) network.append(stage) else: # stage 2 stage = transformer_blocks( Transformer, i, embed_dims[i], layers, num_heads[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, drop_path_rate=drop_path_rate, attn_drop=attn_drop_rate, norm_layer=norm_layer, ) network.append(stage) if downsamples[i]: # downsampling between two stages network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2)) self.network = nn.ModuleList(network) # set post block, for example, class attention layers self.post_network = None if post_layers is not None: self.post_network = nn.ModuleList([ get_block( post_layers[i], dim=embed_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio[-1], qkv_bias=qkv_bias, attn_drop=attn_drop_rate, drop_path=0., norm_layer=norm_layer) for i in range(len(post_layers)) ]) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1])) trunc_normal_(self.cls_token, std=.02) # set output type if use_aux_head: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() else: self.aux_head = None self.norm = norm_layer(self.num_features) # Classifier head self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.pos_embed, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=[ (r'^network\.(\d+)\.(\d+)', None), (r'^network\.(\d+)', (0,)), ], blocks2=[ (r'^cls_token', (0,)), (r'^post_network\.(\d+)', None), (r'^norm', (99999,)) ], ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() if self.aux_head is not None: self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_tokens(self, x): for idx, block in enumerate(self.network): if idx == 2: # add positional encoding after outlooker blocks x = x + self.pos_embed x = self.pos_drop(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) B, H, W, C = x.shape x = x.reshape(B, -1, C) return x def forward_cls(self, x): B, N, C = x.shape cls_tokens = self.cls_token.expand(B, -1, -1) x = torch.cat([cls_tokens, x], dim=1) for block in self.post_network: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(block, x) else: x = block(x) return x def forward_train(self, x): """ A separate forward fn for training with mix_token (if a train script supports). Combining multiple modes in as single forward with different return types is torchscript hell. """ x = self.patch_embed(x) x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # mix token, see token labeling for details. if self.mix_token and self.training: lam = np.random.beta(self.beta, self.beta) patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale) temp_x = x.clone() sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1 sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2 temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :] x = temp_x else: bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0 # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) if self.global_pool == 'avg': x_cls = x.mean(dim=1) elif self.global_pool == 'token': x_cls = x[:, 0] else: x_cls = x if self.aux_head is None: return x_cls x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling if not self.training: return x_cls + 0.5 * x_aux.max(1)[0] if self.mix_token and self.training: # reverse "mix token", see token labeling for details. x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1]) temp_x = x_aux.clone() temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :] x_aux = temp_x x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1]) # return these: 1. class token, 2. classes from all feature tokens, 3. bounding box return x_cls, x_aux, (bbx1, bby1, bbx2, bby2) def forward_features(self, x): x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C # step2: tokens learning in the two stages x = self.forward_tokens(x) # step3: post network, apply class attention or not if self.post_network is not None: x = self.forward_cls(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': out = x.mean(dim=1) elif self.global_pool == 'token': out = x[:, 0] else: out = x x = self.head_drop(x) if pre_logits: return out out = self.head(out) if self.aux_head is not None: # generate classes in all feature tokens, see token labeling aux = self.aux_head(x[:, 1:]) out = out + 0.5 * aux.max(1)[0] return out def forward(self, x): """ simplified forward (without mix token training) """ x = self.forward_features(x) x = self.forward_head(x) return x def _create_volo(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') return build_model_with_cfg( VOLO, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'), **kwargs } default_cfgs = generate_default_cfgs({ 'volo_d1_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar', crop_pct=0.96), 'volo_d1_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d2_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar', crop_pct=0.96), 'volo_d2_384.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar', crop_pct=1.0, input_size=(3, 384, 384)), 'volo_d3_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar', crop_pct=0.96), 'volo_d3_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar', crop_pct=1.0, input_size=(3, 448, 448)), 'volo_d4_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar', crop_pct=0.96), 'volo_d4_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_224.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar', crop_pct=0.96), 'volo_d5_448.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar', crop_pct=1.15, input_size=(3, 448, 448)), 'volo_d5_512.sail_in1k': _cfg( hf_hub_id='timm/', url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar', crop_pct=1.15, input_size=(3, 512, 512)), }) @register_model def volo_d1_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args) return model @register_model def volo_d1_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D1 model, Params: 27M """ model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs) model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args) return model @register_model def volo_d2_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args) return model @register_model def volo_d2_384(pretrained=False, **kwargs) -> VOLO: """ VOLO-D2 model, Params: 59M """ model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args) return model @register_model def volo_d3_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args) return model @register_model def volo_d3_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D3 model, Params: 86M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs) model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args) return model @register_model def volo_d4_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args) return model @register_model def volo_d4_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D4 model, Params: 193M """ model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs) model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_224(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args) return model @register_model def volo_d5_448(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args) return model @register_model def volo_d5_512(pretrained=False, **kwargs) -> VOLO: """ VOLO-D5 model, Params: 296M stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5 """ model_args = dict( layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), mlp_ratio=4, stem_hidden_dim=128, **kwargs) model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vovnet.py
""" VoVNet (V1 & V2) Papers: * `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730 * `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Looked at https://github.com/youngwanLEE/vovnet-detectron2 & https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py for some reference, rewrote most of the code. Hacked together by / Copyright 2020 Ross Wightman """ from typing import List import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \ create_attn, create_norm_act_layer from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['VovNet'] # model_registry will add each entrypoint fn to this class SequentialAppendList(nn.Sequential): def __init__(self, *args): super(SequentialAppendList, self).__init__(*args) def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor: for i, module in enumerate(self): if i == 0: concat_list.append(module(x)) else: concat_list.append(module(concat_list[-1])) x = torch.cat(concat_list, dim=1) return x class OsaBlock(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, layer_per_block, residual=False, depthwise=False, attn='', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path=None, ): super(OsaBlock, self).__init__() self.residual = residual self.depthwise = depthwise conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) next_in_chs = in_chs if self.depthwise and next_in_chs != mid_chs: assert not residual self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs) else: self.conv_reduction = None mid_convs = [] for i in range(layer_per_block): if self.depthwise: conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs) else: conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs) next_in_chs = mid_chs mid_convs.append(conv) self.conv_mid = SequentialAppendList(*mid_convs) # feature aggregation next_in_chs = in_chs + layer_per_block * mid_chs self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs) self.attn = create_attn(attn, out_chs) if attn else None self.drop_path = drop_path def forward(self, x): output = [x] if self.conv_reduction is not None: x = self.conv_reduction(x) x = self.conv_mid(x, output) x = self.conv_concat(x) if self.attn is not None: x = self.attn(x) if self.drop_path is not None: x = self.drop_path(x) if self.residual: x = x + output[0] return x class OsaStage(nn.Module): def __init__( self, in_chs, mid_chs, out_chs, block_per_stage, layer_per_block, downsample=True, residual=True, depthwise=False, attn='ese', norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_path_rates=None, ): super(OsaStage, self).__init__() self.grad_checkpointing = False if downsample: self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True) else: self.pool = None blocks = [] for i in range(block_per_stage): last_block = i == block_per_stage - 1 if drop_path_rates is not None and drop_path_rates[i] > 0.: drop_path = DropPath(drop_path_rates[i]) else: drop_path = None blocks += [OsaBlock( in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise, attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path) ] in_chs = out_chs self.blocks = nn.Sequential(*blocks) def forward(self, x): if self.pool is not None: x = self.pool(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class VovNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, norm_layer=BatchNormAct2d, act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0., **kwargs, ): """ Args: cfg (dict): Model architecture configuration in_chans (int): Number of input channels (default: 3) num_classes (int): Number of classifier classes (default: 1000) global_pool (str): Global pooling type (default: 'avg') output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32) norm_layer (Union[str, nn.Module]): normalization layer act_layer (Union[str, nn.Module]): activation layer drop_rate (float): Dropout rate (default: 0.) drop_path_rate (float): Stochastic depth drop-path rate (default: 0.) kwargs (dict): Extra kwargs overlayed onto cfg """ super(VovNet, self).__init__() self.num_classes = num_classes self.drop_rate = drop_rate assert output_stride == 32 # FIXME support dilation cfg = dict(cfg, **kwargs) stem_stride = cfg.get("stem_stride", 4) stem_chs = cfg["stem_chs"] stage_conv_chs = cfg["stage_conv_chs"] stage_out_chs = cfg["stage_out_chs"] block_per_stage = cfg["block_per_stage"] layer_per_block = cfg["layer_per_block"] conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer) # Stem module last_stem_stride = stem_stride // 2 conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct self.stem = nn.Sequential(*[ ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs), conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs), conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs), ]) self.feature_info = [dict( num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')] current_stride = stem_stride # OSA stages stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage) in_ch_list = stem_chs[-1:] + stage_out_chs[:-1] stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs) stages = [] for i in range(4): # num_stages downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4 stages += [OsaStage( in_ch_list[i], stage_conv_chs[i], stage_out_chs[i], block_per_stage[i], layer_per_block, downsample=downsample, drop_path_rates=stage_dpr[i], **stage_args, )] self.num_features = stage_out_chs[i] current_stride *= 2 if downsample else 1 self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) for n, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.Linear): nn.init.zeros_(m.bias) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) def forward_features(self, x): x = self.stem(x) return self.stages(x) def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x # model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 & # https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py model_cfgs = dict( vovnet39a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=False, depthwise=False, attn='', ), vovnet57a=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=False, depthwise=False, attn='', ), ese_vovnet19b_slim_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_dw=dict( stem_chs=[64, 64, 64], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=True, attn='ese', ), ese_vovnet19b_slim=dict( stem_chs=[64, 64, 128], stage_conv_chs=[64, 80, 96, 112], stage_out_chs=[112, 256, 384, 512], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet19b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=3, block_per_stage=[1, 1, 1, 1], residual=True, depthwise=False, attn='ese', ), ese_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='ese', ), ese_vovnet57b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 4, 3], residual=True, depthwise=False, attn='ese', ), ese_vovnet99b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 3, 9, 3], residual=True, depthwise=False, attn='ese', ), eca_vovnet39b=dict( stem_chs=[64, 64, 128], stage_conv_chs=[128, 160, 192, 224], stage_out_chs=[256, 512, 768, 1024], layer_per_block=5, block_per_stage=[1, 1, 2, 2], residual=True, depthwise=False, attn='eca', ), ) model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b'] def _create_vovnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( VovNet, variant, pretrained, model_cfg=model_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs, } default_cfgs = generate_default_cfgs({ 'vovnet39a.untrained': _cfg(url=''), 'vovnet57a.untrained': _cfg(url=''), 'ese_vovnet19b_slim_dw.untrained': _cfg(url=''), 'ese_vovnet19b_dw.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet19b_slim.untrained': _cfg(url=''), 'ese_vovnet39b.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'ese_vovnet57b.untrained': _cfg(url=''), 'ese_vovnet99b.untrained': _cfg(url=''), 'eca_vovnet39b.untrained': _cfg(url=''), 'ese_vovnet39b_evos.untrained': _cfg(url=''), }) @register_model def vovnet39a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs) @register_model def vovnet57a(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs) @register_model def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs) @register_model def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs) @register_model def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs) @register_model def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet: return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs) # Experimental Models @register_model def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet: def norm_act_fn(num_features, **nkwargs): return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs) return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/xception.py
""" Ported to pytorch thanks to [tstandley](https://github.com/tstandley/Xception-PyTorch) @author: tstandley Adapted by cadene Creates an Xception Model as defined in: Francois Chollet Xception: Deep Learning with Depthwise Separable Convolutions https://arxiv.org/pdf/1610.02357.pdf This weights ported from the Keras implementation. Achieves the following performance on the validation set: Loss:0.9173 Prec@1:78.892 Prec@5:94.292 REMEMBER to set your image size to 3x299x299 for both test and validation normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 """ import torch.jit import torch.nn as nn import torch.nn.functional as F from timm.layers import create_classifier from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Xception'] class SeparableConv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1): super(SeparableConv2d, self).__init__() self.conv1 = nn.Conv2d( in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=False) def forward(self, x): x = self.conv1(x) x = self.pointwise(x) return x class Block(nn.Module): def __init__(self, in_channels, out_channels, reps, strides=1, start_with_relu=True, grow_first=True): super(Block, self).__init__() if out_channels != in_channels or strides != 1: self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) self.skipbn = nn.BatchNorm2d(out_channels) else: self.skip = None rep = [] for i in range(reps): if grow_first: inc = in_channels if i == 0 else out_channels outc = out_channels else: inc = in_channels outc = in_channels if i < (reps - 1) else out_channels rep.append(nn.ReLU(inplace=True)) rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) rep.append(nn.BatchNorm2d(outc)) if not start_with_relu: rep = rep[1:] else: rep[0] = nn.ReLU(inplace=False) if strides != 1: rep.append(nn.MaxPool2d(3, strides, 1)) self.rep = nn.Sequential(*rep) def forward(self, inp): x = self.rep(inp) if self.skip is not None: skip = self.skip(inp) skip = self.skipbn(skip) else: skip = inp x += skip return x class Xception(nn.Module): """ Xception optimized for the ImageNet dataset, as specified in https://arxiv.org/pdf/1610.02357.pdf """ def __init__(self, num_classes=1000, in_chans=3, drop_rate=0., global_pool='avg'): """ Constructor Args: num_classes: number of classes """ super(Xception, self).__init__() self.drop_rate = drop_rate self.global_pool = global_pool self.num_classes = num_classes self.num_features = 2048 self.conv1 = nn.Conv2d(in_chans, 32, 3, 2, 0, bias=False) self.bn1 = nn.BatchNorm2d(32) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(32, 64, 3, bias=False) self.bn2 = nn.BatchNorm2d(64) self.act2 = nn.ReLU(inplace=True) self.block1 = Block(64, 128, 2, 2, start_with_relu=False) self.block2 = Block(128, 256, 2, 2) self.block3 = Block(256, 728, 2, 2) self.block4 = Block(728, 728, 3, 1) self.block5 = Block(728, 728, 3, 1) self.block6 = Block(728, 728, 3, 1) self.block7 = Block(728, 728, 3, 1) self.block8 = Block(728, 728, 3, 1) self.block9 = Block(728, 728, 3, 1) self.block10 = Block(728, 728, 3, 1) self.block11 = Block(728, 728, 3, 1) self.block12 = Block(728, 1024, 2, 2, grow_first=False) self.conv3 = SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = nn.BatchNorm2d(1536) self.act3 = nn.ReLU(inplace=True) self.conv4 = SeparableConv2d(1536, self.num_features, 3, 1, 1) self.bn4 = nn.BatchNorm2d(self.num_features) self.act4 = nn.ReLU(inplace=True) self.feature_info = [ dict(num_chs=64, reduction=2, module='act2'), dict(num_chs=128, reduction=4, module='block2.rep.0'), dict(num_chs=256, reduction=8, module='block3.rep.0'), dict(num_chs=728, reduction=16, module='block12.rep.0'), dict(num_chs=2048, reduction=32, module='act4'), ] self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) # #------- init weights -------- for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^conv[12]|bn[12]', blocks=[ (r'^block(\d+)', None), (r'^conv[34]|bn[34]', (99,)), ], ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.block1(x) x = self.block2(x) x = self.block3(x) x = self.block4(x) x = self.block5(x) x = self.block6(x) x = self.block7(x) x = self.block8(x) x = self.block9(x) x = self.block10(x) x = self.block11(x) x = self.block12(x) x = self.conv3(x) x = self.bn3(x) x = self.act3(x) x = self.conv4(x) x = self.bn4(x) x = self.act4(x) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) if self.drop_rate: F.dropout(x, self.drop_rate, training=self.training) return x if pre_logits else self.fc(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _xception(variant, pretrained=False, **kwargs): return build_model_with_cfg( Xception, variant, pretrained, feature_cfg=dict(feature_cls='hook'), **kwargs) default_cfgs = generate_default_cfgs({ 'legacy_xception.tf_in1k': { 'url': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth', 'input_size': (3, 299, 299), 'pool_size': (10, 10), 'crop_pct': 0.8975, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'num_classes': 1000, 'first_conv': 'conv1', 'classifier': 'fc' # The resize parameter of the validation transform should be 333, and make sure to center crop at 299x299 } }) @register_model def legacy_xception(pretrained=False, **kwargs) -> Xception: return _xception('legacy_xception', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'xception': 'legacy_xception', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/xception_aligned.py
"""Pytorch impl of Aligned Xception 41, 65, 71 This is a correct, from scratch impl of Aligned Xception (Deeplab) models compatible with TF weights at https://github.com/tensorflow/models/blob/master/research/deeplab/g3doc/model_zoo.md Hacked together by / Copyright 2020 Ross Wightman """ from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import ClassifierHead, ConvNormAct, create_conv2d, get_norm_act_layer from timm.layers.helpers import to_3tuple from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['XceptionAligned'] class SeparableConv2d(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=1, dilation=1, padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, ): super(SeparableConv2d, self).__init__() self.kernel_size = kernel_size self.dilation = dilation # depthwise convolution self.conv_dw = create_conv2d( in_chs, in_chs, kernel_size, stride=stride, padding=padding, dilation=dilation, depthwise=True) self.bn_dw = norm_layer(in_chs) self.act_dw = act_layer(inplace=True) if act_layer is not None else nn.Identity() # pointwise convolution self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) self.bn_pw = norm_layer(out_chs) self.act_pw = act_layer(inplace=True) if act_layer is not None else nn.Identity() def forward(self, x): x = self.conv_dw(x) x = self.bn_dw(x) x = self.act_dw(x) x = self.conv_pw(x) x = self.bn_pw(x) x = self.act_pw(x) return x class PreSeparableConv2d(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=1, dilation=1, padding='', act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, first_act=True, ): super(PreSeparableConv2d, self).__init__() norm_act_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) self.kernel_size = kernel_size self.dilation = dilation self.norm = norm_act_layer(in_chs, inplace=True) if first_act else nn.Identity() # depthwise convolution self.conv_dw = create_conv2d( in_chs, in_chs, kernel_size, stride=stride, padding=padding, dilation=dilation, depthwise=True) # pointwise convolution self.conv_pw = create_conv2d(in_chs, out_chs, kernel_size=1) def forward(self, x): x = self.norm(x) x = self.conv_dw(x) x = self.conv_pw(x) return x class XceptionModule(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, pad_type='', start_with_relu=True, no_skip=False, act_layer=nn.ReLU, norm_layer=None, ): super(XceptionModule, self).__init__() out_chs = to_3tuple(out_chs) self.in_channels = in_chs self.out_channels = out_chs[-1] self.no_skip = no_skip if not no_skip and (self.out_channels != self.in_channels or stride != 1): self.shortcut = ConvNormAct( in_chs, self.out_channels, 1, stride=stride, norm_layer=norm_layer, apply_act=False) else: self.shortcut = None separable_act_layer = None if start_with_relu else act_layer self.stack = nn.Sequential() for i in range(3): if start_with_relu: self.stack.add_module(f'act{i + 1}', act_layer(inplace=i > 0)) self.stack.add_module(f'conv{i + 1}', SeparableConv2d( in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, act_layer=separable_act_layer, norm_layer=norm_layer)) in_chs = out_chs[i] def forward(self, x): skip = x x = self.stack(x) if self.shortcut is not None: skip = self.shortcut(skip) if not self.no_skip: x = x + skip return x class PreXceptionModule(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, pad_type='', no_skip=False, act_layer=nn.ReLU, norm_layer=None, ): super(PreXceptionModule, self).__init__() out_chs = to_3tuple(out_chs) self.in_channels = in_chs self.out_channels = out_chs[-1] self.no_skip = no_skip if not no_skip and (self.out_channels != self.in_channels or stride != 1): self.shortcut = create_conv2d(in_chs, self.out_channels, 1, stride=stride) else: self.shortcut = nn.Identity() self.norm = get_norm_act_layer(norm_layer, act_layer=act_layer)(in_chs, inplace=True) self.stack = nn.Sequential() for i in range(3): self.stack.add_module(f'conv{i + 1}', PreSeparableConv2d( in_chs, out_chs[i], 3, stride=stride if i == 2 else 1, dilation=dilation, padding=pad_type, act_layer=act_layer, norm_layer=norm_layer, first_act=i > 0, )) in_chs = out_chs[i] def forward(self, x): x = self.norm(x) skip = x x = self.stack(x) if not self.no_skip: x = x + self.shortcut(skip) return x class XceptionAligned(nn.Module): """Modified Aligned Xception """ def __init__( self, block_cfg, num_classes=1000, in_chans=3, output_stride=32, preact=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_rate=0., global_pool='avg', ): super(XceptionAligned, self).__init__() assert output_stride in (8, 16, 32) self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False layer_args = dict(act_layer=act_layer, norm_layer=norm_layer) self.stem = nn.Sequential(*[ ConvNormAct(in_chans, 32, kernel_size=3, stride=2, **layer_args), create_conv2d(32, 64, kernel_size=3, stride=1) if preact else ConvNormAct(32, 64, kernel_size=3, stride=1, **layer_args) ]) curr_dilation = 1 curr_stride = 2 self.feature_info = [] self.blocks = nn.Sequential() module_fn = PreXceptionModule if preact else XceptionModule for i, b in enumerate(block_cfg): b['dilation'] = curr_dilation if b['stride'] > 1: name = f'blocks.{i}.stack.conv2' if preact else f'blocks.{i}.stack.act3' self.feature_info += [dict(num_chs=to_3tuple(b['out_chs'])[-2], reduction=curr_stride, module=name)] next_stride = curr_stride * b['stride'] if next_stride > output_stride: curr_dilation *= b['stride'] b['stride'] = 1 else: curr_stride = next_stride self.blocks.add_module(str(i), module_fn(**b, **layer_args)) self.num_features = self.blocks[-1].out_channels self.feature_info += [dict( num_chs=self.num_features, reduction=curr_stride, module='blocks.' + str(len(self.blocks) - 1))] self.act = act_layer(inplace=True) if preact else nn.Identity() self.head = ClassifierHead( in_features=self.num_features, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate, ) @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=r'^blocks\.(\d+)', ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) x = self.act(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _xception(variant, pretrained=False, **kwargs): return build_model_with_cfg( XceptionAligned, variant, pretrained, feature_cfg=dict(flatten_sequential=True, feature_cls='hook'), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (10, 10), 'crop_pct': 0.903, 'interpolation': 'bicubic', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'xception65.ra3_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.94, ), 'xception41.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception65.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception71.tf_in1k': _cfg(hf_hub_id='timm/'), 'xception41p.ra3_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.94, ), 'xception65p.ra3_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.94, ), }) @register_model def xception41(pretrained=False, **kwargs) -> XceptionAligned: """ Modified Aligned Xception-41 """ block_cfg = [ # entry flow dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), # middle flow *([dict(in_chs=728, out_chs=728, stride=1)] * 8), # exit flow dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), ] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) return _xception('xception41', pretrained=pretrained, **model_args) @register_model def xception65(pretrained=False, **kwargs) -> XceptionAligned: """ Modified Aligned Xception-65 """ block_cfg = [ # entry flow dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), # middle flow *([dict(in_chs=728, out_chs=728, stride=1)] * 16), # exit flow dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), ] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) return _xception('xception65', pretrained=pretrained, **model_args) @register_model def xception71(pretrained=False, **kwargs) -> XceptionAligned: """ Modified Aligned Xception-71 """ block_cfg = [ # entry flow dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=1), dict(in_chs=256, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=1), dict(in_chs=728, out_chs=728, stride=2), # middle flow *([dict(in_chs=728, out_chs=728, stride=1)] * 16), # exit flow dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True, start_with_relu=False), ] model_args = dict(block_cfg=block_cfg, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) return _xception('xception71', pretrained=pretrained, **model_args) @register_model def xception41p(pretrained=False, **kwargs) -> XceptionAligned: """ Modified Aligned Xception-41 w/ Pre-Act """ block_cfg = [ # entry flow dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), # middle flow *([dict(in_chs=728, out_chs=728, stride=1)] * 8), # exit flow dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), no_skip=True, stride=1), ] model_args = dict(block_cfg=block_cfg, preact=True, norm_layer=nn.BatchNorm2d, **kwargs) return _xception('xception41p', pretrained=pretrained, **model_args) @register_model def xception65p(pretrained=False, **kwargs) -> XceptionAligned: """ Modified Aligned Xception-65 w/ Pre-Act """ block_cfg = [ # entry flow dict(in_chs=64, out_chs=128, stride=2), dict(in_chs=128, out_chs=256, stride=2), dict(in_chs=256, out_chs=728, stride=2), # middle flow *([dict(in_chs=728, out_chs=728, stride=1)] * 16), # exit flow dict(in_chs=728, out_chs=(728, 1024, 1024), stride=2), dict(in_chs=1024, out_chs=(1536, 1536, 2048), stride=1, no_skip=True), ] model_args = dict( block_cfg=block_cfg, preact=True, norm_layer=partial(nn.BatchNorm2d, eps=.001, momentum=.1), **kwargs) return _xception('xception65p', pretrained=pretrained, **model_args)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/xcit.py
""" Cross-Covariance Image Transformer (XCiT) in PyTorch Paper: - https://arxiv.org/abs/2106.09681 Same as the official implementation, with some minor adaptations, original copyright below - https://github.com/facebookresearch/xcit/blob/master/xcit.py Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman """ # Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. import math from functools import partial import torch import torch.nn as nn from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, trunc_normal_, to_2tuple from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .cait import ClassAttn from .vision_transformer import Mlp __all__ = ['Xcit'] # model_registry will add each entrypoint fn to this @register_notrace_module # reason: FX can't symbolically trace torch.arange in forward method class PositionalEncodingFourier(nn.Module): """ Positional encoding relying on a fourier kernel matching the one used in the "Attention is all you Need" paper. Based on the official XCiT code - https://github.com/facebookresearch/xcit/blob/master/xcit.py """ def __init__(self, hidden_dim=32, dim=768, temperature=10000): super().__init__() self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1) self.scale = 2 * math.pi self.temperature = temperature self.hidden_dim = hidden_dim self.dim = dim self.eps = 1e-6 def forward(self, B: int, H: int, W: int): device = self.token_projection.weight.device y_embed = torch.arange(1, H+1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W) x_embed = torch.arange(1, W+1, dtype=torch.float32, device=device).repeat(1, H, 1) y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device) dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3) pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) pos = self.token_projection(pos) return pos.repeat(B, 1, 1, 1) # (B, C, H, W) def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution + batch norm""" return torch.nn.Sequential( nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(out_planes) ) class ConvPatchEmbed(nn.Module): """Image to Patch Embedding using multiple convolutional layers""" def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU): super().__init__() img_size = to_2tuple(img_size) num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches if patch_size == 16: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 8, 2), act_layer(), conv3x3(embed_dim // 8, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) elif patch_size == 8: self.proj = torch.nn.Sequential( conv3x3(in_chans, embed_dim // 4, 2), act_layer(), conv3x3(embed_dim // 4, embed_dim // 2, 2), act_layer(), conv3x3(embed_dim // 2, embed_dim, 2), ) else: raise('For convolutional projection, patch size has to be in [8, 16]') def forward(self, x): x = self.proj(x) Hp, Wp = x.shape[2], x.shape[3] x = x.flatten(2).transpose(1, 2) # (B, N, C) return x, (Hp, Wp) class LPI(nn.Module): """ Local Patch Interaction module that allows explicit communication between tokens in 3x3 windows to augment the implicit communication performed by the block diagonal scatter attention. Implemented using 2 layers of separable 3x3 convolutions with GeLU and BatchNorm2d """ def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3): super().__init__() out_features = out_features or in_features padding = kernel_size // 2 self.conv1 = torch.nn.Conv2d( in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features) self.act = act_layer() self.bn = nn.BatchNorm2d(in_features) self.conv2 = torch.nn.Conv2d( in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features) def forward(self, x, H: int, W: int): B, N, C = x.shape x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.conv1(x) x = self.act(x) x = self.bn(x) x = self.conv2(x) x = x.reshape(B, C, N).permute(0, 2, 1) return x class ClassAttentionBlock(nn.Module): """Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239""" def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = ClassAttn( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) if eta is not None: # LayerScale Initialization (no layerscale when None) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) else: self.gamma1, self.gamma2 = 1.0, 1.0 # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 self.tokens_norm = tokens_norm def forward(self, x): x_norm1 = self.norm1(x) x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) x = x + self.drop_path(self.gamma1 * x_attn) if self.tokens_norm: x = self.norm2(x) else: x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) x_res = x cls_token = x[:, 0:1] cls_token = self.gamma2 * self.mlp(cls_token) x = torch.cat([cls_token, x[:, 1:]], dim=1) x = x_res + self.drop_path(x) return x class XCA(nn.Module): """ Cross-Covariance Attention (XCA) Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h) """ def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape # Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N) qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) # Paper section 3.2 l2-Normalization and temperature scaling q = torch.nn.functional.normalize(q, dim=-1) k = torch.nn.functional.normalize(k, dim=-1) attn = (q @ k.transpose(-2, -1)) * self.temperature attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) # (B, H, C', N), permute -> (B, N, H, C') x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x @torch.jit.ignore def no_weight_decay(self): return {'temperature'} class XCABlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., ): super().__init__() self.norm1 = norm_layer(dim) self.attn = XCA(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm3 = norm_layer(dim) self.local_mp = LPI(in_features=dim, act_layer=act_layer) self.norm2 = norm_layer(dim) self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop) self.gamma1 = nn.Parameter(eta * torch.ones(dim)) self.gamma3 = nn.Parameter(eta * torch.ones(dim)) self.gamma2 = nn.Parameter(eta * torch.ones(dim)) def forward(self, x, H: int, W: int): x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) # NOTE official code has 3 then 2, so keeping it the same to be consistent with loaded weights # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 x = x + self.drop_path(self.gamma3 * self.local_mp(self.norm3(x), H, W)) x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) return x class Xcit(nn.Module): """ Based on timm and DeiT code bases https://github.com/rwightman/pytorch-image-models/tree/master/timm https://github.com/facebookresearch/deit/ """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='token', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., pos_drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., act_layer=None, norm_layer=None, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, ): """ Args: img_size (int, tuple): input image size patch_size (int): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate after positional embedding, and in XCA/CA projection + MLP pos_drop_rate: position embedding dropout rate proj_drop_rate (float): projection dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate (constant across all layers) norm_layer: (nn.Module): normalization layer cls_attn_layers: (int) Depth of Class attention layers use_pos_embed: (bool) whether to use positional encoding eta: (float) layerscale initialization value tokens_norm: (bool) Whether to normalize all tokens or just the cls_token in the CA Notes: - Although `layer_norm` is user specifiable, there are hard-coded `BatchNorm2d`s in the local patch interaction (class LPI) and the patch embedding (class ConvPatchEmbed) """ super().__init__() assert global_pool in ('', 'avg', 'token') img_size = to_2tuple(img_size) assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \ '`patch_size` should divide image dimensions evenly' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.global_pool = global_pool self.grad_checkpointing = False self.patch_embed = ConvPatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer, ) self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_pos_embed: self.pos_embed = PositionalEncodingFourier(dim=embed_dim) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=pos_drop_rate) self.blocks = nn.ModuleList([ XCABlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, ) for _ in range(depth)]) self.cls_attn_blocks = nn.ModuleList([ ClassAttentionBlock( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=drop_rate, attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm, ) for _ in range(cls_attn_layers)]) # Classifier head self.norm = norm_layer(embed_dim) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() # Init weights trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|pos_embed|patch_embed', # stem and embed blocks=r'^blocks\.(\d+)', cls_attn_blocks=[(r'^cls_attn_blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): B = x.shape[0] # x is (B, N, C). (Hp, Hw) is (height in units of patches, width in units of patches) x, (Hp, Wp) = self.patch_embed(x) if self.pos_embed is not None: # `pos_embed` (B, C, Hp, Wp), reshape -> (B, C, N), permute -> (B, N, C) pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1) x = x + pos_encoding x = self.pos_drop(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, Hp, Wp) else: x = blk(x, Hp, Wp) x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) for blk in self.cls_attn_blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'model' in state_dict: state_dict = state_dict['model'] # For consistency with timm's transformer models while being compatible with official weights source we rename # pos_embeder to pos_embed. Also account for use_pos_embed == False use_pos_embed = getattr(model, 'pos_embed', None) is not None pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')] for k in pos_embed_keys: if use_pos_embed: state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k) else: del state_dict[k] # timm's implementation of class attention in CaiT is slightly more efficient as it does not compute query vectors # for all tokens, just the class token. To use official weights source we must split qkv into q, k, v if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict(): num_ca_blocks = len(model.cls_attn_blocks) for i in range(num_ca_blocks): qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight') qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1]) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j] qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None) if qkv_bias is not None: qkv_bias = qkv_bias.reshape(3, -1) for j, subscript in enumerate('qkv'): state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j] return state_dict def _create_xcit(variant, pretrained=False, default_cfg=None, **kwargs): model = build_model_with_cfg( Xcit, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # Patch size 16 'xcit_nano_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth'), 'xcit_nano_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth'), 'xcit_nano_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth'), 'xcit_tiny_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth'), 'xcit_tiny_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth'), 'xcit_tiny_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth'), 'xcit_tiny_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth'), 'xcit_small_12_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth'), 'xcit_small_12_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth'), 'xcit_small_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth'), 'xcit_small_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth'), 'xcit_medium_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth'), 'xcit_medium_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p16_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth'), 'xcit_large_24_p16_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth'), 'xcit_large_24_p16_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth', input_size=(3, 384, 384)), # Patch size 8 'xcit_nano_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth'), 'xcit_nano_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth'), 'xcit_nano_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth'), 'xcit_tiny_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth'), 'xcit_tiny_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_tiny_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth'), 'xcit_tiny_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth'), 'xcit_tiny_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_12_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth'), 'xcit_small_12_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth'), 'xcit_small_12_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_small_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth'), 'xcit_small_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth'), 'xcit_small_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_medium_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth'), 'xcit_medium_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth'), 'xcit_medium_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth', input_size=(3, 384, 384)), 'xcit_large_24_p8_224.fb_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth'), 'xcit_large_24_p8_224.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth'), 'xcit_large_24_p8_384.fb_dist_in1k': _cfg( hf_hub_id='timm/', url='https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth', input_size=(3, 384, 384)), }) @register_model def xcit_nano_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False, img_size=384) model = _create_xcit('xcit_nano_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p16_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=16, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p16_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model # Patch size 8x8 models @register_model def xcit_nano_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_nano_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=128, depth=12, num_heads=4, eta=1.0, tokens_norm=False) model = _create_xcit('xcit_nano_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=12, num_heads=4, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_tiny_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_12_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=12, num_heads=8, eta=1.0, tokens_norm=True) model = _create_xcit('xcit_small_12_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_tiny_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=192, depth=24, num_heads=4, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_tiny_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_small_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=384, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_small_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_medium_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=512, depth=24, num_heads=8, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_medium_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_224(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def xcit_large_24_p8_384(pretrained=False, **kwargs) -> Xcit: model_args = dict( patch_size=8, embed_dim=768, depth=24, num_heads=16, eta=1e-5, tokens_norm=True) model = _create_xcit('xcit_large_24_p8_384', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { # Patch size 16 'xcit_nano_12_p16_224_dist': 'xcit_nano_12_p16_224.fb_dist_in1k', 'xcit_nano_12_p16_384_dist': 'xcit_nano_12_p16_384.fb_dist_in1k', 'xcit_tiny_12_p16_224_dist': 'xcit_tiny_12_p16_224.fb_dist_in1k', 'xcit_tiny_12_p16_384_dist': 'xcit_tiny_12_p16_384.fb_dist_in1k', 'xcit_tiny_24_p16_224_dist': 'xcit_tiny_24_p16_224.fb_dist_in1k', 'xcit_tiny_24_p16_384_dist': 'xcit_tiny_24_p16_384.fb_dist_in1k', 'xcit_small_12_p16_224_dist': 'xcit_small_12_p16_224.fb_dist_in1k', 'xcit_small_12_p16_384_dist': 'xcit_small_12_p16_384.fb_dist_in1k', 'xcit_small_24_p16_224_dist': 'xcit_small_24_p16_224.fb_dist_in1k', 'xcit_medium_24_p16_224_dist': 'xcit_medium_24_p16_224.fb_dist_in1k', 'xcit_medium_24_p16_384_dist': 'xcit_medium_24_p16_384.fb_dist_in1k', 'xcit_large_24_p16_224_dist': 'xcit_large_24_p16_224.fb_dist_in1k', 'xcit_large_24_p16_384_dist': 'xcit_large_24_p16_384.fb_dist_in1k', # Patch size 8 'xcit_nano_12_p8_224_dist': 'xcit_nano_12_p8_224.fb_dist_in1k', 'xcit_nano_12_p8_384_dist': 'xcit_nano_12_p8_384.fb_dist_in1k', 'xcit_tiny_12_p8_224_dist': 'xcit_tiny_12_p8_224.fb_dist_in1k', 'xcit_tiny_12_p8_384_dist': 'xcit_tiny_12_p8_384.fb_dist_in1k', 'xcit_tiny_24_p8_224_dist': 'xcit_tiny_24_p8_224.fb_dist_in1k', 'xcit_tiny_24_p8_384_dist': 'xcit_tiny_24_p8_384.fb_dist_in1k', 'xcit_small_12_p8_224_dist': 'xcit_small_12_p8_224.fb_dist_in1k', 'xcit_small_12_p8_384_dist': 'xcit_small_12_p8_384.fb_dist_in1k', 'xcit_small_24_p8_224_dist': 'xcit_small_24_p8_224.fb_dist_in1k', 'xcit_small_24_p8_384_dist': 'xcit_small_24_p8_384.fb_dist_in1k', 'xcit_medium_24_p8_224_dist': 'xcit_medium_24_p8_224.fb_dist_in1k', 'xcit_medium_24_p8_384_dist': 'xcit_medium_24_p8_384.fb_dist_in1k', 'xcit_large_24_p8_224_dist': 'xcit_large_24_p8_224.fb_dist_in1k', 'xcit_large_24_p8_384_dist': 'xcit_large_24_p8_384.fb_dist_in1k', })
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt
conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt
conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt
conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt
conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt
conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/layers/__init__.py
# NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition from timm.layers.activations import * from timm.layers.adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from timm.layers.blur_pool import BlurPool2d from timm.layers.classifier import ClassifierHead, create_classifier from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ set_layer_config from timm.layers.conv2d_same import Conv2dSame, conv2d_same from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn from timm.layers.create_attn import get_attn, create_attn from timm.layers.create_conv2d import create_conv2d from timm.layers.create_norm import get_norm_layer, create_norm_layer from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from timm.layers.gather_excite import GatherExcite from timm.layers.global_context import GlobalContext from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from timm.layers.inplace_abn import InplaceAbn from timm.layers.linear import Linear from timm.layers.mixed_conv2d import MixedConv2d from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm from timm.layers.padding import get_padding, get_same_padding, pad_same from timm.layers.patch_embed import PatchEmbed from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from timm.layers.selective_kernel import SelectiveKernel from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct from timm.layers.space_to_depth import SpaceToDepthModule from timm.layers.split_attn import SplitAttn from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool from timm.layers.trace_utils import _assert, _float_to_int from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/__init__.py
from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adamw import AdamW from .adan import Adan from .lamb import Lamb from .lars import Lars from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .lion import Lion from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adabelief.py
import math import torch from torch.optim.optimizer import Optimizer class AdaBelief(Optimizer): r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-16) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) decoupled_decay (boolean, optional): (default: True) If set as True, then the optimizer uses decoupled weight decay as in AdamW fixed_decay (boolean, optional): (default: False) This is used when weight_decouple is set as True. When fixed_decay == True, the weight decay is performed as $W_{new} = W_{old} - W_{old} \times decay$. When fixed_decay == False, the weight decay is performed as $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the weight decay ratio decreases with learning rate (lr). rectify (boolean, optional): (default: True) If set as True, then perform the rectified update similar to RAdam degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update when variance of gradient is high reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' For example train/args for EfficientNet see these gists - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): for param in params: if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): param['buffer'] = [[None, None, None] for _ in range(10)] defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) super(AdaBelief, self).__init__(params, defaults) def __setstate__(self, state): super(AdaBelief, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def reset(self): for group in self.param_groups: for p in group['params']: state = self.state[p] amsgrad = group['amsgrad'] # State initialization state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_var'] = torch.zeros_like(p) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_var'] = torch.zeros_like(p) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() amsgrad = group['amsgrad'] beta1, beta2 = group['betas'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p_fp32) # Exponential moving average of squared gradient values state['exp_avg_var'] = torch.zeros_like(p_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_var'] = torch.zeros_like(p_fp32) # perform weight decay, check if decoupled weight decay if group['decoupled_decay']: if not group['fixed_decay']: p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) else: p_fp32.mul_(1.0 - group['weight_decay']) else: if group['weight_decay'] != 0: grad.add_(p_fp32, alpha=group['weight_decay']) # get current state variable exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] # Update first and second moment running average exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) grad_residual = grad - exp_avg exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) if amsgrad: max_exp_avg_var = state['max_exp_avg_var'] # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) # update if not group['rectify']: # Default update step_size = group['lr'] / bias_correction1 p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: # Rectified update, forked from RAdam buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) elif group['degenerated_to_sgd']: step_size = 1.0 / (1 - beta1 ** state['step']) else: step_size = -1 buffered[2] = step_size if num_sma >= 5: denom = exp_avg_var.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) elif step_size > 0: p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adafactor.py
""" Adafactor Optimizer Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Original header/copyright below. """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import math class Adafactor(torch.optim.Optimizer): """Implements Adafactor algorithm. This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` (see https://arxiv.org/abs/1804.04235) Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): external learning rate (default: None) eps (tuple[float, float]): regularization constants for square gradient and parameter scale respectively (default: (1e-30, 1e-3)) clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) beta1 (float): coefficient used for computing running averages of gradient (default: None) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) warmup_init (bool): time-dependent learning rate computation depends on whether warm-up initialization is being used (default: False) """ def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): relative_step = not lr if warmup_init and not relative_step: raise ValueError('warmup_init requires relative_step=True') beta1 = None if betas is None else betas[0] # make it compat with standard betas arg defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): if param_group['relative_step']: min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps_scale'], param_state['RMS']) param_group['lr'] = lr_t * param_scale return param_group['lr'] @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group['beta1'] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() return torch.mul(r_factor, c_factor) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] factored, use_first_moment = self._get_options(group, grad.shape) # State Initialization if len(state) == 0: state['step'] = 0 if use_first_moment: # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_fp32 = p if p.dtype in {torch.float16, torch.bfloat16}: p_fp32 = p_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_fp32) lr_t = self._get_lr(group, state) beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) update = grad ** 2 + group['eps'] if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(lr_t) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) update = exp_avg if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) p_fp32.add_(-update) if p.dtype in {torch.float16, torch.bfloat16}: p.copy_(p_fp32) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adahessian.py
""" AdaHessian Optimizer Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py Originally licensed MIT, Copyright 2020, David Samuel """ import torch class Adahessian(torch.optim.Optimizer): """ Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 0.1) betas ((float, float), optional): coefficients used for computing running averages of gradient and the squared hessian trace (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) hessian_power (float, optional): exponent of the hessian trace (default: 1.0) update_each (int, optional): compute the hessian trace approximation only after *this* number of steps (to save time) (default: 1) n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) """ def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): if not 0.0 <= lr: raise ValueError(f"Invalid learning rate: {lr}") if not 0.0 <= eps: raise ValueError(f"Invalid epsilon value: {eps}") if not 0.0 <= betas[0] < 1.0: raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") if not 0.0 <= betas[1] < 1.0: raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") if not 0.0 <= hessian_power <= 1.0: raise ValueError(f"Invalid Hessian power value: {hessian_power}") self.n_samples = n_samples self.update_each = update_each self.avg_conv_kernel = avg_conv_kernel # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training self.seed = 2147483647 self.generator = torch.Generator().manual_seed(self.seed) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) super(Adahessian, self).__init__(params, defaults) for p in self.get_params(): p.hess = 0.0 self.state[p]["hessian step"] = 0 @property def is_second_order(self): return True def get_params(self): """ Gets all parameters in all param_groups with gradients """ return (p for group in self.param_groups for p in group['params'] if p.requires_grad) def zero_hessian(self): """ Zeros out the accumalated hessian traces. """ for p in self.get_params(): if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: p.hess.zero_() @torch.no_grad() def set_hessian(self): """ Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. """ params = [] for p in filter(lambda p: p.grad is not None, self.get_params()): if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step params.append(p) self.state[p]["hessian step"] += 1 if len(params) == 0: return if self.generator.device != params[0].device: # hackish way of casting the generator to the right device self.generator = torch.Generator(params[0].device).manual_seed(self.seed) grads = [p.grad for p in params] for i in range(self.n_samples): # Rademacher distribution {-1.0, 1.0} zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] h_zs = torch.autograd.grad( grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) for h_z, z, p in zip(h_zs, zs, params): p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step. Arguments: closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) """ loss = None if closure is not None: loss = closure() self.zero_hessian() self.set_hessian() for group in self.param_groups: for p in group['params']: if p.grad is None or p.hess is None: continue if self.avg_conv_kernel and p.dim() == 4: p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() # Perform correct stepweight decay as in AdamW p.mul_(1 - group['lr'] * group['weight_decay']) state = self.state[p] # State initialization if len(state) == 1: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of Hessian diagonal square values state['exp_hessian_diag_sq'] = torch.zeros_like(p) exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] k = group['hessian_power'] denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) # make update step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adamp.py
""" AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 Code: https://github.com/clovaai/AdamP Copyright (c) 2020-present NAVER Corp. MIT license """ import torch import torch.nn.functional as F from torch.optim.optimizer import Optimizer import math def _channel_view(x) -> torch.Tensor: return x.reshape(x.size(0), -1) def _layer_view(x) -> torch.Tensor: return x.reshape(1, -1) def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): wd = 1. expand_size = (-1,) + (1,) * (len(p.shape) - 1) for view_func in [_channel_view, _layer_view]: param_view = view_func(p) grad_view = view_func(grad) cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() # FIXME this is a problem for PyTorch XLA if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) wd = wd_ratio return perturb, wd return perturb, wd class AdamP(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) super(AdamP, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad beta1, beta2 = group['betas'] nesterov = group['nesterov'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) # Adam exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 if nesterov: perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom else: perturb = exp_avg / denom # Projection wd_ratio = 1. if len(p.shape) > 1: perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) # Weight decay if group['weight_decay'] > 0: p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) # Step p.add_(perturb, alpha=-step_size) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adamw.py
""" AdamW Optimizer Impl copied from PyTorch master NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed someday """ import math import torch from torch.optim.optimizer import Optimizer class AdamW(Optimizer): r"""Implements AdamW algorithm. The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue # Perform stepweight decay p.data.mul_(1 - group['lr'] * group['weight_decay']) # Perform optimization step grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = group['lr'] / bias_correction1 p.addcdiv_(exp_avg, denom, value=-step_size) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/adan.py
""" Adan Optimizer Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022. https://arxiv.org/abs/2208.06677 Implementation adapted from https://github.com/sail-sg/Adan """ import math import torch from torch.optim import Optimizer class Adan(Optimizer): """ Implements a pytorch variant of Adan Adan was proposed in Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022. https://arxiv.org/abs/2208.06677 Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float, flot], optional): coefficients used for computing running averages of gradient and its norm. (default: (0.98, 0.92, 0.99)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): decoupled weight decay (L2 penalty) (default: 0) no_prox (bool): how to perform the decoupled weight decay (default: False) """ def __init__( self, params, lr=1e-3, betas=(0.98, 0.92, 0.99), eps=1e-8, weight_decay=0.0, no_prox=False, ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= betas[2] < 1.0: raise ValueError("Invalid beta parameter at index 2: {}".format(betas[2])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, no_prox=no_prox) super(Adan, self).__init__(params, defaults) @torch.no_grad() def restart_opt(self): for group in self.param_groups: group['step'] = 0 for p in group['params']: if p.requires_grad: state = self.state[p] # State initialization # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p) # Exponential moving average of gradient difference state['exp_avg_diff'] = torch.zeros_like(p) @torch.no_grad() def step(self, closure=None): """ Performs a single optimization step. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: beta1, beta2, beta3 = group['betas'] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += 1 else: group['step'] = 1 bias_correction1 = 1.0 - beta1 ** group['step'] bias_correction2 = 1.0 - beta2 ** group['step'] bias_correction3 = 1.0 - beta3 ** group['step'] for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] if len(state) == 0: state['exp_avg'] = torch.zeros_like(p) state['exp_avg_diff'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) state['pre_grad'] = grad.clone() exp_avg, exp_avg_sq, exp_avg_diff = state['exp_avg'], state['exp_avg_diff'], state['exp_avg_sq'] grad_diff = grad - state['pre_grad'] exp_avg.lerp_(grad, 1. - beta1) # m_t exp_avg_diff.lerp_(grad_diff, 1. - beta2) # diff_t (v) update = grad + beta2 * grad_diff exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1. - beta3) # n_t denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction3)).add_(group['eps']) update = (exp_avg / bias_correction1 + beta2 * exp_avg_diff / bias_correction2).div_(denom) if group['no_prox']: p.data.mul_(1 - group['lr'] * group['weight_decay']) p.add_(update, alpha=-group['lr']) else: p.add_(update, alpha=-group['lr']) p.data.div_(1 + group['lr'] * group['weight_decay']) state['pre_grad'].copy_(grad) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/lamb.py
""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb This optimizer code was adapted from the following (starting with latest) * https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py * https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py * https://github.com/cybertronai/pytorch-lamb Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. Original copyrights for above sources are below. Modifications Copyright 2021 Ross Wightman """ # Copyright (c) 2021, Habana Labs Ltd. All rights reserved. # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MIT License # # Copyright (c) 2019 cybertronai # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math import torch from torch.optim import Optimizer class Lamb(Optimizer): """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its norm. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) grad_averaging (bool, optional): whether apply (1-beta2) to grad when calculating running averages of gradient. (default: True) max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) trust_clip (bool): enable LAMBC trust ratio clipping (default: False) always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 weight decay parameter (default: False) .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): defaults = dict( lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, trust_clip=trust_clip, always_adapt=always_adapt) super().__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly global_grad_norm = torch.zeros(1, device=device) for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') global_grad_norm.add_(grad.pow(2).sum()) global_grad_norm = torch.sqrt(global_grad_norm) # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes # scalar types properly https://github.com/pytorch/pytorch/issues/9190 max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) clip_global_grad_norm = torch.where( global_grad_norm > max_grad_norm, global_grad_norm / max_grad_norm, one_tensor) for group in self.param_groups: bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] grad_averaging = 1 if group['grad_averaging'] else 0 beta3 = 1 - beta1 if grad_averaging else 1.0 # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += 1 else: group['step'] = 1 if bias_correction: bias_correction1 = 1 - beta1 ** group['step'] bias_correction2 = 1 - beta2 ** group['step'] else: bias_correction1, bias_correction2 = 1.0, 1.0 for p in group['params']: if p.grad is None: continue grad = p.grad.div_(clip_global_grad_norm) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient valuesa state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) update = (exp_avg / bias_correction1).div_(denom) weight_decay = group['weight_decay'] if weight_decay != 0: update.add_(p, alpha=weight_decay) if weight_decay != 0 or group['always_adapt']: # Layer-wise LR adaptation. By default, skip adaptation on parameters that are # excluded from weight decay, unless always_adapt == True, then always enabled. w_norm = p.norm(2.0) g_norm = update.norm(2.0) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, w_norm / g_norm, one_tensor), one_tensor, ) if group['trust_clip']: # LAMBC trust clipping, upper bound fixed at one trust_ratio = torch.minimum(trust_ratio, one_tensor) update.mul_(trust_ratio) p.add_(update, alpha=-group['lr']) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/lars.py
""" PyTorch LARS / LARC Optimizer An implementation of LARS (SGD) + LARC in PyTorch Based on: * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py Additional cleanup and modifications to properly support PyTorch XLA. Copyright 2021 Ross Wightman """ import torch from torch.optim.optimizer import Optimizer class Lars(Optimizer): """ LARS for PyTorch Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate (default: 1.0). momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) eps (float): eps for division denominator (default: 1e-8) trust_clip (bool): enable LARC trust ratio clipping (default: False) always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) """ def __init__( self, params, lr=1.0, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coeff=0.001, eps=1e-8, trust_clip=False, always_adapt=False, ): if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coeff=trust_coeff, eps=eps, trust_clip=trust_clip, always_adapt=always_adapt, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/lion.py
""" Lion Optimizer Paper: `Symbolic Discovery of Optimization Algorithms` - https://arxiv.org/abs/2302.06675 Original Impl: https://github.com/google/automl/tree/master/lion """ # Copyright 2023 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import List import torch from torch.optim.optimizer import Optimizer class Lion(Optimizer): r"""Implements Lion algorithm.""" def __init__( self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0, maximize=False, foreach=None, ): """Initialize the hyperparameters. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-4) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.99)) weight_decay (float, optional): weight decay coefficient (default: 0) """ if not 0.0 <= lr: raise ValueError('Invalid learning rate: {}'.format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) defaults = dict( lr=lr, betas=betas, weight_decay=weight_decay, foreach=foreach, maximize=maximize, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('maximize', False) group.setdefault('foreach', None) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. Returns: the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Lion does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) lion( params_with_grad, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], maximize=group['maximize'], foreach=group['foreach'], ) return loss def lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim maximize: bool = False, foreach: bool = None, *, beta1: float, beta2: float, lr: float, weight_decay: float, ): r"""Functional API that performs Lion algorithm computation. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_lion else: func = _single_tensor_lion func( params, grads, exp_avgs, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize, ) def _single_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool, ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) param = torch.view_as_real(param) # Perform stepweight decay param.mul_(1 - lr * weight_decay) # Weight update update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) param.add_(torch.sign(update), alpha=-lr) # Decay the momentum running average coefficient exp_avg.lerp_(grad, 1 - beta2) def _multi_tensor_lion( params: List[torch.Tensor], grads: List[torch.Tensor], exp_avgs: List[torch.Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool, ): if len(params) == 0: return if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Weight update updates = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(updates, grads, alpha=1 - beta1) updates = [u.sign() for u in updates] torch._foreach_add_(params, updates, alpha=-lr) # Decay the momentum running average coefficient torch._foreach_mul_(exp_avgs, beta2) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/lookahead.py
""" Lookahead Optimizer Wrapper. Implementation modified from: https://github.com/alphadl/lookahead.pytorch Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from typing import Callable, Dict import torch from torch.optim.optimizer import Optimizer from collections import defaultdict class Lookahead(Optimizer): def __init__(self, base_optimizer, alpha=0.5, k=6): # NOTE super().__init__() not called on purpose self._optimizer_step_pre_hooks: Dict[int, Callable] = OrderedDict() self._optimizer_step_post_hooks: Dict[int, Callable] = OrderedDict() if not 0.0 <= alpha <= 1.0: raise ValueError(f'Invalid slow update rate: {alpha}') if not 1 <= k: raise ValueError(f'Invalid lookahead steps: {k}') defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) self._base_optimizer = base_optimizer self.param_groups = base_optimizer.param_groups self.defaults = base_optimizer.defaults self.defaults.update(defaults) self.state = defaultdict(dict) # manually add our defaults to the param groups for name, default in defaults.items(): for group in self._base_optimizer.param_groups: group.setdefault(name, default) @torch.no_grad() def update_slow(self, group): for fast_p in group["params"]: if fast_p.grad is None: continue param_state = self._base_optimizer.state[fast_p] if 'lookahead_slow_buff' not in param_state: param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) param_state['lookahead_slow_buff'].copy_(fast_p) slow = param_state['lookahead_slow_buff'] slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) fast_p.copy_(slow) def sync_lookahead(self): for group in self._base_optimizer.param_groups: self.update_slow(group) @torch.no_grad() def step(self, closure=None): loss = self._base_optimizer.step(closure) for group in self._base_optimizer.param_groups: group['lookahead_step'] += 1 if group['lookahead_step'] % group['lookahead_k'] == 0: self.update_slow(group) return loss def state_dict(self): return self._base_optimizer.state_dict() def load_state_dict(self, state_dict): self._base_optimizer.load_state_dict(state_dict) self.param_groups = self._base_optimizer.param_groups
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/madgrad.py
""" PyTorch MADGRAD optimizer MADGRAD: https://arxiv.org/abs/2101.11075 Code from: https://github.com/facebookresearch/madgrad """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import TYPE_CHECKING, Any, Callable, Optional import torch import torch.optim if TYPE_CHECKING: from torch.optim.optimizer import _params_t else: _params_t = Any class MADGRAD(torch.optim.Optimizer): """ MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic Optimization. .. _MADGRAD: https://arxiv.org/abs/2101.11075 MADGRAD is a general purpose optimizer that can be used in place of SGD or Adam may converge faster and generalize better. Currently GPU-only. Typically, the same learning rate schedule that is used for SGD or Adam may be used. The overall learning rate is not comparable to either method and should be determined by a hyper-parameter sweep. MADGRAD requires less weight decay than other methods, often as little as zero. Momentum values used for SGD or Adam's beta1 should work here also. On sparse problems both weight_decay and momentum should be set to 0. Arguments: params (iterable): Iterable of parameters to optimize or dicts defining parameter groups. lr (float): Learning rate (default: 1e-2). momentum (float): Momentum value in the range [0,1) (default: 0.9). weight_decay (float): Weight decay, i.e. a L2 penalty (default: 0). eps (float): Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). """ def __init__( self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6, decoupled_decay: bool = False, ): if momentum < 0 or momentum >= 1: raise ValueError(f"Momentum {momentum} must be in the range [0,1]") if lr <= 0: raise ValueError(f"Learning rate {lr} must be positive") if weight_decay < 0: raise ValueError(f"Weight decay {weight_decay} must be non-negative") if eps < 0: raise ValueError(f"Eps must be non-negative") defaults = dict( lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) super().__init__(params, defaults) @property def supports_memory_efficient_fp16(self) -> bool: return False @property def supports_flat_params(self) -> bool: return True @torch.no_grad() def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: eps = group['eps'] lr = group['lr'] + eps weight_decay = group['weight_decay'] momentum = group['momentum'] ck = 1 - momentum for p in group["params"]: if p.grad is None: continue grad = p.grad if momentum != 0.0 and grad.is_sparse: raise RuntimeError("momentum != 0 is not compatible with sparse gradients") state = self.state[p] if len(state) == 0: state['step'] = 0 state['grad_sum_sq'] = torch.zeros_like(p) state['s'] = torch.zeros_like(p) if momentum != 0: state['x0'] = torch.clone(p).detach() state['step'] += 1 grad_sum_sq = state['grad_sum_sq'] s = state['s'] lamb = lr * math.sqrt(state['step']) # Apply weight decay if weight_decay != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * weight_decay) else: if grad.is_sparse: raise RuntimeError("weight_decay option is not compatible with sparse gradients") grad.add_(p, alpha=weight_decay) if grad.is_sparse: grad = grad.coalesce() grad_val = grad._values() p_masked = p.sparse_mask(grad) grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) s_masked = s.sparse_mask(grad) # Compute x_0 from other known quantities rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) # Dense + sparse op grad_sq = grad * grad grad_sum_sq.add_(grad_sq, alpha=lamb) grad_sum_sq_masked.add_(grad_sq, alpha=lamb) rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) s.add_(grad, alpha=lamb) s_masked._values().add_(grad_val, alpha=lamb) # update masked copy of p p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) # Copy updated masked p to dense p using an add operation p_masked._values().add_(p_kp1_masked_vals, alpha=-1) p.add_(p_masked, alpha=-1) else: if momentum == 0: # Compute x_0 from other known quantities rms = grad_sum_sq.pow(1 / 3).add_(eps) x0 = p.addcdiv(s, rms, value=1) else: x0 = state['x0'] # Accumulate second moments grad_sum_sq.addcmul_(grad, grad, value=lamb) rms = grad_sum_sq.pow(1 / 3).add_(eps) # Update s s.add_(grad, alpha=lamb) # Step if momentum == 0: p.copy_(x0.addcdiv(s, rms, value=-1)) else: z = x0.addcdiv(s, rms, value=-1) # p is a moving average of z p.mul_(1 - ck).add_(z, alpha=ck) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/nadam.py
import math import torch from torch.optim.optimizer import Optimizer class Nadam(Optimizer): """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). It has been proposed in `Incorporating Nesterov Momentum into Adam`__. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) schedule_decay (float, optional): momentum schedule decay (default: 4e-3) __ http://cs229.stanford.edu/proj2015/054_report.pdf __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf Originally taken from: https://github.com/pytorch/pytorch/pull/1408 NOTE: Has potential issues but does work well on some problems. """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, schedule_decay=4e-3): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay, ) super(Nadam, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['m_schedule'] = 1. state['exp_avg'] = torch.zeros_like(p) state['exp_avg_sq'] = torch.zeros_like(p) # Warming momentum schedule m_schedule = state['m_schedule'] schedule_decay = group['schedule_decay'] exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] eps = group['eps'] state['step'] += 1 t = state['step'] bias_correction2 = 1 - beta2 ** t if group['weight_decay'] != 0: grad = grad.add(p, alpha=group['weight_decay']) momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) m_schedule_new = m_schedule * momentum_cache_t m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 state['m_schedule'] = m_schedule_new # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/nadamw.py
""" NAdamW Optimizer Based on simplified algorithm in https://github.com/mlcommons/algorithmic-efficiency/tree/main/baselines/nadamw Added multi-tensor (foreach) path. """ import math from typing import List, Optional import torch from torch import Tensor # Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py. class NAdamW(torch.optim.Optimizer): r"""Implements NAdamW algorithm. See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of the NAdam algorithm (there is also a comment in the code which highlights the only difference of NAdamW and AdamW). For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, maximize: bool = False, foreach: Optional[bool] = None, capturable: bool = False, ): if not 0.0 <= lr: raise ValueError(f'Invalid learning rate: {lr}') if not 0.0 <= eps: raise ValueError(f'Invalid epsilon value: {eps}') if not 0.0 <= betas[0] < 1.0: raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') if not 0.0 <= betas[1] < 1.0: raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') if not 0.0 <= weight_decay: raise ValueError(f'Invalid weight_decay value: {weight_decay}') defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize, capturable=capturable, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor( state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('NAdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) state_steps.append(state['step']) nadamw( params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], capturable=group['capturable'], ) return loss def nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], foreach: Optional[bool] = None, capturable: bool = False, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, ) -> None: r"""Functional API that performs NAdamW algorithm computation. See NAdamW class for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError( 'API has changed, `state_steps` argument must contain a list of' + ' singleton tensors') if foreach is None: foreach = True if foreach and not torch.jit.is_scripting(): func = _multi_tensor_nadamw else: func = _single_tensor_nadamw func( params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, capturable=capturable, ) def _single_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool ): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] # Update step. step_t += 1 # Perform stepweight decay. param.mul_(1. - lr * weight_decay) # Decay the first and second moment running average coefficient. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if capturable: step = step_t # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_nadamw( params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool, ): if len(params) == 0: return if capturable: assert all( p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) ), "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # update steps torch._foreach_add_(state_steps, 1) # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_( exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size) ) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] # Only difference between NAdamW and AdamW in this implementation. # The official PyTorch implementation of NAdam uses a different algorithm. exp_avgs = torch._foreach_mul(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/nvnovograd.py
""" Nvidia NovoGrad Optimizer. Original impl by Nvidia from Jasper example: - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` - https://arxiv.org/abs/1905.11286 """ import torch from torch.optim.optimizer import Optimizer import math class NvNovoGrad(Optimizer): """ Implements Novograd algorithm. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.95, 0.98)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) grad_averaging: gradient averaging amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) """ def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, weight_decay=0, grad_averaging=False, amsgrad=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, amsgrad=amsgrad) super(NvNovoGrad, self).__init__(params, defaults) def __setstate__(self, state): super(NvNovoGrad, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('Sparse gradients are not supported.') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 norm = torch.sum(torch.pow(grad, 2)) if exp_avg_sq == 0: exp_avg_sq.copy_(norm) else: exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) grad.div_(denom) if group['weight_decay'] != 0: grad.add_(p, alpha=group['weight_decay']) if group['grad_averaging']: grad.mul_(1 - beta1) exp_avg.mul_(beta1).add_(grad) p.add_(exp_avg, alpha=-group['lr']) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/optim_factory.py
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2021 Ross Wightman """ import logging from itertools import islice from typing import Optional, Callable, Tuple import torch import torch.nn as nn import torch.optim as optim from timm.models import group_parameters from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adan import Adan from .lamb import Lamb from .lars import Lars from .lion import Lion from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nadamw import NAdamW from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP _logger = logging.getLogger(__name__) # optimizers to default to multi-tensor _DEFAULT_FOREACH = { 'lion', } def param_groups_weight_decay( model: nn.Module, weight_decay=1e-5, no_weight_decay_list=() ): no_weight_decay_list = set(no_weight_decay_list) decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def _group(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def _layer_map(model, layers_per_group=12, num_groups=None): def _in_head(n, hp): if not hp: return True elif isinstance(hp, (tuple, list)): return any([n.startswith(hpi) for hpi in hp]) else: return n.startswith(hp) head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) names_trunk = [] names_head = [] for n, _ in model.named_parameters(): names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) # group non-head layers num_trunk_layers = len(names_trunk) if num_groups is not None: layers_per_group = -(num_trunk_layers // -num_groups) names_trunk = list(_group(names_trunk, layers_per_group)) num_trunk_groups = len(names_trunk) layer_map = {n: i for i, l in enumerate(names_trunk) for n in l} layer_map.update({n: num_trunk_groups for n in names_head}) return layer_map def param_groups_layer_decay( model: nn.Module, weight_decay: float = 0.05, no_weight_decay_list: Tuple[str] = (), layer_decay: float = .75, end_layer_decay: Optional[float] = None, verbose: bool = False, ): """ Parameter groups for layer-wise lr decay & weight decay Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 """ no_weight_decay_list = set(no_weight_decay_list) param_group_names = {} # NOTE for debugging param_groups = {} if hasattr(model, 'group_matcher'): # FIXME interface needs more work layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) else: # fallback layer_map = _layer_map(model) num_layers = max(layer_map.values()) + 1 layer_max = num_layers - 1 layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers)) for name, param in model.named_parameters(): if not param.requires_grad: continue # no decay: all 1D parameters and model specific ones if param.ndim == 1 or name in no_weight_decay_list: g_decay = "no_decay" this_decay = 0. else: g_decay = "decay" this_decay = weight_decay layer_id = layer_map.get(name, layer_max) group_name = "layer_%d_%s" % (layer_id, g_decay) if group_name not in param_groups: this_scale = layer_scales[layer_id] param_group_names[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "param_names": [], } param_groups[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "params": [], } param_group_names[group_name]["param_names"].append(name) param_groups[group_name]["params"].append(param) if verbose: import json _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) return list(param_groups.values()) def optimizer_kwargs(cfg): """ cfg/argparse to kwargs helper Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. """ kwargs = dict( opt=cfg.opt, lr=cfg.lr, weight_decay=cfg.weight_decay, momentum=cfg.momentum, ) if getattr(cfg, 'opt_eps', None) is not None: kwargs['eps'] = cfg.opt_eps if getattr(cfg, 'opt_betas', None) is not None: kwargs['betas'] = cfg.opt_betas if getattr(cfg, 'layer_decay', None) is not None: kwargs['layer_decay'] = cfg.layer_decay if getattr(cfg, 'opt_args', None) is not None: kwargs.update(cfg.opt_args) if getattr(cfg, 'opt_foreach', None) is not None: kwargs['foreach'] = cfg.opt_foreach return kwargs def create_optimizer(args, model, filter_bias_and_bn=True): """ Legacy optimizer factory for backwards compatibility. NOTE: Use create_optimizer_v2 for new code. """ return create_optimizer_v2( model, **optimizer_kwargs(cfg=args), filter_bias_and_bn=filter_bias_and_bn, ) def create_optimizer_v2( model_or_params, opt: str = 'sgd', lr: Optional[float] = None, weight_decay: float = 0., momentum: float = 0.9, foreach: Optional[bool] = None, filter_bias_and_bn: bool = True, layer_decay: Optional[float] = None, param_group_fn: Optional[Callable] = None, **kwargs, ): """ Create an optimizer. TODO currently the model is passed in and all parameters are selected for optimization. For more general use an interface that allows selection of parameters to optimize and lr groups, one of: * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion * expose the parameters interface and leave it up to caller Args: model_or_params (nn.Module): model containing parameters to optimize opt: name of optimizer to create lr: initial learning rate weight_decay: weight decay to apply in optimizer momentum: momentum for momentum based optimizers (others may use betas via kwargs) foreach: Enable / disable foreach (multi-tensor) operation if True / False. Choose safe default if None filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay **kwargs: extra optimizer specific kwargs to pass through Returns: Optimizer """ if isinstance(model_or_params, nn.Module): # a model was passed in, extract parameters and add weight decays to appropriate layers no_weight_decay = {} if hasattr(model_or_params, 'no_weight_decay'): no_weight_decay = model_or_params.no_weight_decay() if param_group_fn: parameters = param_group_fn(model_or_params) elif layer_decay is not None: parameters = param_groups_layer_decay( model_or_params, weight_decay=weight_decay, layer_decay=layer_decay, no_weight_decay_list=no_weight_decay, ) weight_decay = 0. elif weight_decay and filter_bias_and_bn: parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay) weight_decay = 0. else: parameters = model_or_params.parameters() else: # iterable of parameters or param groups passed in parameters = model_or_params opt_lower = opt.lower() opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower.startswith('fused'): try: from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD has_apex = True except ImportError: has_apex = False assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' if opt_lower.startswith('bnb'): try: import bitsandbytes as bnb has_bnb = True except ImportError: has_bnb = False assert has_bnb and torch.cuda.is_available(), 'bitsandbytes and CUDA required for bnb optimizers' opt_args = dict(weight_decay=weight_decay, **kwargs) if lr is not None: opt_args.setdefault('lr', lr) if foreach is None: if opt in _DEFAULT_FOREACH: opt_args.setdefault('foreach', True) else: opt_args['foreach'] = foreach # basic SGD & related if opt_lower == 'sgd' or opt_lower == 'nesterov': # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) # adaptive elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'nadam': try: # NOTE PyTorch >= 1.10 should have native NAdam optimizer = optim.Nadam(parameters, **opt_args) except AttributeError: optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'nadamw': optimizer = NAdamW(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamax': optimizer = optim.Adamax(parameters, **opt_args) elif opt_lower == 'adabelief': optimizer = AdaBelief(parameters, rectify=False, **opt_args) elif opt_lower == 'radabelief': optimizer = AdaBelief(parameters, rectify=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adagrad': opt_args.setdefault('eps', 1e-8) optimizer = optim.Adagrad(parameters, **opt_args) elif opt_lower == 'adafactor': optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adanp': optimizer = Adan(parameters, no_prox=False, **opt_args) elif opt_lower == 'adanw': optimizer = Adan(parameters, no_prox=True, **opt_args) elif opt_lower == 'lamb': optimizer = Lamb(parameters, **opt_args) elif opt_lower == 'lambc': optimizer = Lamb(parameters, trust_clip=True, **opt_args) elif opt_lower == 'larc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) elif opt_lower == 'lars': optimizer = Lars(parameters, momentum=momentum, **opt_args) elif opt_lower == 'nlarc': optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) elif opt_lower == 'nlars': optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'madgrad': optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'madgradw': optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': optimizer = NvNovoGrad(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) elif opt_lower == 'lion': opt_args.pop('eps', None) optimizer = Lion(parameters, **opt_args) # second order elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) # NVIDIA fused optimizers, require APEX to be installed elif opt_lower == 'fusedsgd': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'fusedmomentum': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) elif opt_lower == 'fusedadam': optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif opt_lower == 'fusedadamw': optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif opt_lower == 'fusedlamb': optimizer = FusedLAMB(parameters, **opt_args) elif opt_lower == 'fusednovograd': opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) # bitsandbytes optimizers, require bitsandbytes to be installed elif opt_lower == 'bnbsgd': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbsgd8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, nesterov=True, **opt_args) elif opt_lower == 'bnbmomentum': opt_args.pop('eps', None) optimizer = bnb.optim.SGD(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbmomentum8bit': opt_args.pop('eps', None) optimizer = bnb.optim.SGD8bit(parameters, momentum=momentum, **opt_args) elif opt_lower == 'bnbadam': optimizer = bnb.optim.Adam(parameters, **opt_args) elif opt_lower == 'bnbadam8bit': optimizer = bnb.optim.Adam8bit(parameters, **opt_args) elif opt_lower == 'bnbadamw': optimizer = bnb.optim.AdamW(parameters, **opt_args) elif opt_lower == 'bnbadamw8bit': optimizer = bnb.optim.AdamW8bit(parameters, **opt_args) elif opt_lower == 'bnblamb': optimizer = bnb.optim.LAMB(parameters, **opt_args) elif opt_lower == 'bnblamb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblars': optimizer = bnb.optim.LARS(parameters, **opt_args) elif opt_lower == 'bnblarsb8bit': optimizer = bnb.optim.LAMB8bit(parameters, **opt_args) elif opt_lower == 'bnblion': optimizer = bnb.optim.Lion(parameters, **opt_args) elif opt_lower == 'bnblion8bit': optimizer = bnb.optim.Lion8bit(parameters, **opt_args) else: assert False and "Invalid optimizer" raise ValueError if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/radam.py
"""RAdam Optimizer. Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 """ import math import torch from torch.optim.optimizer import Optimizer class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_fp32 = p.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_sq'] = torch.zeros_like(p_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) # more conservative since it's an approximated value if num_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: p_fp32.add_(exp_avg, alpha=-step_size) p.copy_(p_fp32) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/rmsprop_tf.py
""" RMSProp modified to behave like Tensorflow impl Originally cut & paste from PyTorch RMSProp https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE Modifications Copyright 2021 Ross Wightman """ import torch from torch.optim import Optimizer class RMSpropTF(Optimizer): """Implements RMSprop algorithm (TensorFlow style epsilon) NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt and a few other modifications to closer match Tensorflow for matching hyper-params. Noteworthy changes include: 1. Epsilon applied inside square-root 2. square_avg initialized to ones 3. LR scaling of update accumulated in momentum buffer Proposed by G. Hinton in his `course <http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_. The centered version first appears in `Generating Sequences With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) momentum (float, optional): momentum factor (default: 0) alpha (float, optional): smoothing (decay) constant (default: 0.9) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-10) centered (bool, optional) : if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance weight_decay (float, optional): weight decay (L2 penalty) (default: 0) decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer update as per defaults in Tensorflow """ def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, decoupled_decay=False, lr_in_momentum=True): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= momentum: raise ValueError("Invalid momentum value: {}".format(momentum)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= alpha: raise ValueError("Invalid alpha value: {}".format(alpha)) defaults = dict( lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) super(RMSpropTF, self).__init__(params, defaults) def __setstate__(self, state): super(RMSpropTF, self).__setstate__(state) for group in self.param_groups: group.setdefault('momentum', 0) group.setdefault('centered', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad if grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like(p) if group['centered']: state['grad_avg'] = torch.zeros_like(p) square_avg = state['square_avg'] one_minus_alpha = 1. - group['alpha'] state['step'] += 1 if group['weight_decay'] != 0: if group['decoupled_decay']: p.mul_(1. - group['lr'] * group['weight_decay']) else: grad = grad.add(p, alpha=group['weight_decay']) # Tensorflow order of ops for updating squared avg square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original if group['centered']: grad_avg = state['grad_avg'] grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original else: avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt if group['momentum'] > 0: buf = state['momentum_buffer'] # Tensorflow accumulates the LR scaling in the momentum buffer if group['lr_in_momentum']: buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) p.add_(-buf) else: # PyTorch scales the param update by LR buf.mul_(group['momentum']).addcdiv_(grad, avg) p.add_(buf, alpha=-group['lr']) else: p.addcdiv_(grad, avg, value=-group['lr']) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/optim/sgdp.py
""" SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 Code: https://github.com/clovaai/AdamP Copyright (c) 2020-present NAVER Corp. MIT license """ import torch import torch.nn.functional as F from torch.optim.optimizer import Optimizer, required import math from .adamp import projection class SGDP(Optimizer): def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) super(SGDP, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if p.grad is None: continue grad = p.grad state = self.state[p] # State initialization if len(state) == 0: state['momentum'] = torch.zeros_like(p) # SGD buf = state['momentum'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: d_p = grad + momentum * buf else: d_p = buf # Projection wd_ratio = 1. if len(p.shape) > 1: d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) # Weight decay if weight_decay != 0: p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) # Step p.add_(d_p, alpha=-group['lr']) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/__init__.py
from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/cosine_lr.py
""" Cosine Scheduler Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. Hacked together by / Copyright 2021 Ross Wightman """ import logging import math import numpy as np import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class CosineLRScheduler(Scheduler): """ Cosine decay with restarts. This is described in the paper https://arxiv.org/abs/1608.03983. Inspiration from https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning( "Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/multistep_lr.py
""" MultiStep LR Scheduler Basic multi step LR schedule with warmup, noise. """ import torch import bisect from timm.scheduler.scheduler import Scheduler from typing import List class MultiStepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: List[int], decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def get_curr_decay_steps(self, t): # find where in the array t goes, # assumes self.decay_t is sorted return bisect.bisect_right(self.decay_t, t + 1) def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] return lrs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/plateau_lr.py
""" Plateau Scheduler Adapts PyTorch plateau scheduler and allows application of noise, warmup. Hacked together by / Copyright 2020 Ross Wightman """ import torch from .scheduler import Scheduler class PlateauLRScheduler(Scheduler): """Decay the LR by a factor every time the validation loss plateaus.""" def __init__( self, optimizer, decay_rate=0.1, patience_t=10, verbose=True, threshold=1e-4, cooldown_t=0, warmup_t=0, warmup_lr_init=0, lr_min=0, mode='max', noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize=True, ): super().__init__( optimizer, 'lr', noise_range_t=noise_range_t, noise_type=noise_type, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, patience=patience_t, factor=decay_rate, verbose=verbose, threshold=threshold, cooldown=cooldown_t, mode=mode, min_lr=lr_min ) self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] self.restore_lr = None def state_dict(self): return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] # override the base class step fn completely def step(self, epoch, metric=None): if epoch <= self.warmup_t: lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] super().update_groups(lrs) else: if self.restore_lr is not None: # restore actual LR from before our last noise perturbation before stepping base for i, param_group in enumerate(self.optimizer.param_groups): param_group['lr'] = self.restore_lr[i] self.restore_lr = None self.lr_scheduler.step(metric, epoch) # step the base scheduler if self._is_apply_noise(epoch): self._apply_noise(epoch) def step_update(self, num_updates: int, metric: float = None): return None def _apply_noise(self, epoch): noise = self._calculate_noise(epoch) # apply the noise on top of previous LR, cache the old value so we can restore for normal # stepping of base scheduler restore_lr = [] for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) restore_lr.append(old_lr) new_lr = old_lr + old_lr * noise param_group['lr'] = new_lr self.restore_lr = restore_lr def _get_lr(self, t: int) -> float: assert False, 'should not be called as step is overridden'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/poly_lr.py
""" Polynomial Scheduler Polynomial LR schedule with warmup, noise. Hacked together by / Copyright 2021 Ross Wightman """ import math import logging import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class PolyLRScheduler(Scheduler): """ Polynomial LR Scheduler w/ warmup, noise, and k-decay k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, power: float = 0.5, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning("Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.power = power self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/scheduler.py
import abc from abc import ABC from typing import Any, Dict, Optional import torch class Scheduler(ABC): """ Parameter Scheduler Base Class A scheduler base class that can be used to schedule any optimizer parameter groups. Unlike the builtin PyTorch schedulers, this is intended to be consistently called * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value * At the END of each optimizer update, after incrementing the update count, to calculate next update's value The schedulers built on this should try to remain as stateless as possible (for simplicity). This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' and -1 values for special behaviour. All epoch and update counts must be tracked in the training code and explicitly passed in to the schedulers on the corresponding step or step_update call. Based on ideas from: * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers """ def __init__( self, optimizer: torch.optim.Optimizer, param_group_field: str, t_in_epochs: bool = True, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool = True, ) -> None: self.optimizer = optimizer self.param_group_field = param_group_field self._initial_param_group_field = f"initial_{param_group_field}" if initialize: for i, group in enumerate(self.optimizer.param_groups): if param_group_field not in group: raise KeyError(f"{param_group_field} missing from param_groups[{i}]") group.setdefault(self._initial_param_group_field, group[param_group_field]) else: for i, group in enumerate(self.optimizer.param_groups): if self._initial_param_group_field not in group: raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] self.metric = None # any point to having this for all? self.t_in_epochs = t_in_epochs self.noise_range_t = noise_range_t self.noise_pct = noise_pct self.noise_type = noise_type self.noise_std = noise_std self.noise_seed = noise_seed if noise_seed is not None else 42 self.update_groups(self.base_values) def state_dict(self) -> Dict[str, Any]: return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod def _get_lr(self, t: int) -> float: pass def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None return self._get_lr(t) def step(self, epoch: int, metric: float = None) -> None: self.metric = metric values = self._get_values(epoch, on_epoch=True) if values is not None: values = self._add_noise(values, epoch) self.update_groups(values) def step_update(self, num_updates: int, metric: float = None): self.metric = metric values = self._get_values(num_updates, on_epoch=False) if values is not None: values = self._add_noise(values, num_updates) self.update_groups(values) def update_groups(self, values): if not isinstance(values, (list, tuple)): values = [values] * len(self.optimizer.param_groups) for param_group, value in zip(self.optimizer.param_groups, values): if 'lr_scale' in param_group: param_group[self.param_group_field] = value * param_group['lr_scale'] else: param_group[self.param_group_field] = value def _add_noise(self, lrs, t): if self._is_apply_noise(t): noise = self._calculate_noise(t) lrs = [v + v * noise for v in lrs] return lrs def _is_apply_noise(self, t) -> bool: """Return True if scheduler in noise range.""" apply_noise = False if self.noise_range_t is not None: if isinstance(self.noise_range_t, (list, tuple)): apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] else: apply_noise = t >= self.noise_range_t return apply_noise def _calculate_noise(self, t) -> float: g = torch.Generator() g.manual_seed(self.noise_seed + t) if self.noise_type == 'normal': while True: # resample if noise out of percent limit, brute force but shouldn't spin much noise = torch.randn(1, generator=g).item() if abs(noise) < self.noise_pct: return noise else: noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct return noise
0