repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/vision_transformer_relpos.py
""" Relative Position Vision Transformer (ViT) in PyTorch NOTE: these models are experimental / WIP, expect changes Hacked together by / Copyright 2022, Ross Wightman """ import logging import math from functools import partial from typing import Optional, Tuple import torch import torch.nn as nn from torch.jit import Final from torch.utils.checkpoint import checkpoint from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn from ._builder import build_model_with_cfg from ._registry import generate_default_cfgs, register_model __all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class RelPosAttention(nn.Module): fused_attn: Final[bool] def __init__( self, dim, num_heads=8, qkv_bias=False, qk_norm=False, rel_pos_cls=None, attn_drop=0., proj_drop=0., norm_layer=nn.LayerNorm, ): super().__init__() assert dim % num_heads == 0, 'dim should be divisible by num_heads' self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = qkv.unbind(0) q = self.q_norm(q) k = self.k_norm(k) if self.fused_attn: if self.rel_pos is not None: attn_bias = self.rel_pos.get_bias() elif shared_rel_pos is not None: attn_bias = shared_rel_pos else: attn_bias = None x = torch.nn.functional.scaled_dot_product_attention( q, k, v, attn_mask=attn_bias, dropout_p=self.attn_drop.p, ) else: q = q * self.scale attn = q @ k.transpose(-2, -1) if self.rel_pos is not None: attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos) elif shared_rel_pos is not None: attn = attn + shared_rel_pos attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v x = x.transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class LayerScale(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): return x.mul_(self.gamma) if self.inplace else x * self.gamma class RelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class ResPostRelPosBlock(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_norm=False, rel_pos_cls=None, init_values=None, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.init_values = init_values self.attn = RelPosAttention( dim, num_heads, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, attn_drop=attn_drop, proj_drop=proj_drop, ) self.norm1 = norm_layer(dim) self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, ) self.norm2 = norm_layer(dim) self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.init_weights() def init_weights(self): # NOTE this init overrides that base model init with specific changes for the block type if self.init_values is not None: nn.init.constant_(self.norm1.weight, self.init_values) nn.init.constant_(self.norm2.weight, self.init_values) def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None): x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos))) x = x + self.drop_path2(self.norm2(self.mlp(x))) return x class VisionTransformerRelPos(nn.Module): """ Vision Transformer w/ Relative Position Bias Differing from classic vit, this impl * uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed * defaults to no class token (can be enabled) * defaults to global avg pool for head (can be changed) * layer-scale (residual branch gain) enabled """ def __init__( self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, global_pool='avg', embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=True, qk_norm=False, init_values=1e-6, class_token=False, fc_norm=False, rel_pos_type='mlp', rel_pos_dim=None, shared_rel_pos=False, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0., weight_init='skip', embed_layer=PatchEmbed, norm_layer=None, act_layer=None, block_fn=RelPosBlock ): """ Args: img_size (int, tuple): input image size patch_size (int, tuple): patch size in_chans (int): number of input channels num_classes (int): number of classes for classification head global_pool (str): type of global pooling for final sequence (default: 'avg') embed_dim (int): embedding dimension depth (int): depth of transformer num_heads (int): number of attention heads mlp_ratio (int): ratio of mlp hidden dim to embedding dim qkv_bias (bool): enable bias for qkv if True qk_norm (bool): Enable normalization of query and key in attention init_values: (float): layer-scale init values class_token (bool): use class token (default: False) fc_norm (bool): use pre classifier norm instead of pre-pool rel_pos_ty pe (str): type of relative position shared_rel_pos (bool): share relative pos across all blocks drop_rate (float): dropout rate proj_drop_rate (float): projection dropout rate attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate weight_init (str): weight init scheme embed_layer (nn.Module): patch embedding layer norm_layer: (nn.Module): normalization layer act_layer: (nn.Module): MLP activation layer """ super().__init__() assert global_pool in ('', 'avg', 'token') assert class_token or global_pool != 'token' norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) act_layer = act_layer or nn.GELU self.num_classes = num_classes self.global_pool = global_pool self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.num_prefix_tokens = 1 if class_token else 0 self.grad_checkpointing = False self.patch_embed = embed_layer( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, ) feat_size = self.patch_embed.grid_size rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens) if rel_pos_type.startswith('mlp'): if rel_pos_dim: rel_pos_args['hidden_dim'] = rel_pos_dim if 'swin' in rel_pos_type: rel_pos_args['mode'] = 'swin' rel_pos_cls = partial(RelPosMlp, **rel_pos_args) else: rel_pos_cls = partial(RelPosBias, **rel_pos_args) self.shared_rel_pos = None if shared_rel_pos: self.shared_rel_pos = rel_pos_cls(num_heads=num_heads) # NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both... rel_pos_cls = None self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ block_fn( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_norm=qk_norm, rel_pos_cls=rel_pos_cls, init_values=init_values, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity() # Classifier Head self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity() self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() if weight_init != 'skip': self.init_weights(weight_init) def init_weights(self, mode=''): assert mode in ('jax', 'moco', '') if self.cls_token is not None: nn.init.normal_(self.cls_token, std=1e-6) # FIXME weight init scheme using PyTorch defaults curently #named_apply(get_init_weights_vit(mode, head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {'cls_token'} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^cls_token|patch_embed', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes: int, global_pool=None): self.num_classes = num_classes if global_pool is not None: assert global_pool in ('', 'avg', 'token') self.global_pool = global_pool self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) if self.cls_token is not None: x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1) shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos) else: x = blk(x, shared_rel_pos=shared_rel_pos) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool: x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0] x = self.fc_norm(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs): if kwargs.get('features_only', None): raise RuntimeError('features_only not implemented for Vision Transformer models.') model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, **kwargs) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth', hf_hub_id='timm/', input_size=(3, 256, 256)), 'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)), 'vit_relpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth', hf_hub_id='timm/'), 'vit_srelpos_small_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth', hf_hub_id='timm/'), 'vit_srelpos_medium_patch16_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth', hf_hub_id='timm/'), 'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_cls_224.untrained': _cfg(), 'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth', hf_hub_id='timm/'), 'vit_relpos_small_patch16_rpn_224.untrained': _cfg(), 'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth', hf_hub_id='timm/'), 'vit_relpos_base_patch16_rpn_224.untrained': _cfg(), }) @register_model def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False, rel_pos_dim=384, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=512, shared_rel_pos=True) model = _create_vision_transformer_relpos( 'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-M/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False, rel_pos_dim=256, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token') model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position, class token present NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled Leaving here for comparisons w/ a future re-train as it performs quite well. """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos: """ ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token """ model_args = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock) model = _create_vision_transformer_relpos( 'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/nest.py
""" Nested Transformer (NesT) in PyTorch A PyTorch implement of Aggregating Nested Transformers as described in: 'Aggregating Nested Transformers' - https://arxiv.org/abs/2105.12723 The official Jax code is released and available at https://github.com/google-research/nested-transformer. The weights have been converted with convert/convert_nest_flax.py Acknowledgments: * The paper authors for sharing their research, code, and model weights * Ross Wightman's existing code off which I based this Copyright 2021 Alexander Soare """ import collections.abc import logging import math from functools import partial import torch import torch.nn.functional as F from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import PatchEmbed, Mlp, DropPath, create_classifier, trunc_normal_, _assert from timm.layers import create_conv2d, create_pool2d, to_ntuple, use_fused_attn, LayerNorm from ._builder import build_model_with_cfg from ._features_fx import register_notrace_function from ._manipulate import checkpoint_seq, named_apply from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['Nest'] # model_registry will add each entrypoint fn to this _logger = logging.getLogger(__name__) class Attention(nn.Module): """ This is much like `.vision_transformer.Attention` but uses *localised* self attention by accepting an input with an extra "image block" dim """ fused_attn: torch.jit.Final[bool] def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3*dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): """ x is shape: B (batch_size), T (image blocks), N (seq length per image block), C (embed dim) """ B, T, N, C = x.shape # result of next line is (qkv, B, num (H)eads, T, N, (C')hannels per head) qkv = self.qkv(x).reshape(B, T, N, 3, self.num_heads, C // self.num_heads).permute(3, 0, 4, 1, 2, 5) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) if self.fused_attn: x = F.scaled_dot_product_attention(q, k, v, dropout_p=self.attn_drop.p) else: q = q * self.scale attn = q @ k.transpose(-2, -1) # (B, H, T, N, N) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = attn @ v # (B, H, T, N, C'), permute -> (B, T, N, C', H) x = x.permute(0, 2, 3, 4, 1).reshape(B, T, N, C) x = self.proj(x) x = self.proj_drop(x) return x # (B, T, N, C) class TransformerLayer(nn.Module): """ This is much like `.vision_transformer.Block` but: - Called TransformerLayer here to allow for "block" as defined in the paper ("non-overlapping image blocks") - Uses modified Attention layer that handles the "block" dimension """ def __init__( self, dim, num_heads, mlp_ratio=4., qkv_bias=False, proj_drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop, ) def forward(self, x): y = self.norm1(x) x = x + self.drop_path(self.attn(y)) x = x + self.drop_path(self.mlp(self.norm2(x))) return x class ConvPool(nn.Module): def __init__(self, in_channels, out_channels, norm_layer, pad_type=''): super().__init__() self.conv = create_conv2d(in_channels, out_channels, kernel_size=3, padding=pad_type, bias=True) self.norm = norm_layer(out_channels) self.pool = create_pool2d('max', kernel_size=3, stride=2, padding=pad_type) def forward(self, x): """ x is expected to have shape (B, C, H, W) """ _assert(x.shape[-2] % 2 == 0, 'BlockAggregation requires even input spatial dims') _assert(x.shape[-1] % 2 == 0, 'BlockAggregation requires even input spatial dims') x = self.conv(x) # Layer norm done over channel dim only x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) x = self.pool(x) return x # (B, C, H//2, W//2) def blockify(x, block_size: int): """image to blocks Args: x (Tensor): with shape (B, H, W, C) block_size (int): edge length of a single square block in units of H, W """ B, H, W, C = x.shape _assert(H % block_size == 0, '`block_size` must divide input height evenly') _assert(W % block_size == 0, '`block_size` must divide input width evenly') grid_height = H // block_size grid_width = W // block_size x = x.reshape(B, grid_height, block_size, grid_width, block_size, C) x = x.transpose(2, 3).reshape(B, grid_height * grid_width, -1, C) return x # (B, T, N, C) @register_notrace_function # reason: int receives Proxy def deblockify(x, block_size: int): """blocks to image Args: x (Tensor): with shape (B, T, N, C) where T is number of blocks and N is sequence size per block block_size (int): edge length of a single square block in units of desired H, W """ B, T, _, C = x.shape grid_size = int(math.sqrt(T)) height = width = grid_size * block_size x = x.reshape(B, grid_size, grid_size, block_size, block_size, C) x = x.transpose(2, 3).reshape(B, height, width, C) return x # (B, H, W, C) class NestLevel(nn.Module): """ Single hierarchical level of a Nested Transformer """ def __init__( self, num_blocks, block_size, seq_length, num_heads, depth, embed_dim, prev_embed_dim=None, mlp_ratio=4., qkv_bias=True, proj_drop=0., attn_drop=0., drop_path=[], norm_layer=None, act_layer=None, pad_type='', ): super().__init__() self.block_size = block_size self.grad_checkpointing = False self.pos_embed = nn.Parameter(torch.zeros(1, num_blocks, seq_length, embed_dim)) if prev_embed_dim is not None: self.pool = ConvPool(prev_embed_dim, embed_dim, norm_layer=norm_layer, pad_type=pad_type) else: self.pool = nn.Identity() # Transformer encoder if len(drop_path): assert len(drop_path) == depth, 'Must provide as many drop path rates as there are transformer layers' self.transformer_encoder = nn.Sequential(*[ TransformerLayer( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop, attn_drop=attn_drop, drop_path=drop_path[i], norm_layer=norm_layer, act_layer=act_layer, ) for i in range(depth)]) def forward(self, x): """ expects x as (B, C, H, W) """ x = self.pool(x) x = x.permute(0, 2, 3, 1) # (B, H', W', C), switch to channels last for transformer x = blockify(x, self.block_size) # (B, T, N, C') x = x + self.pos_embed if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.transformer_encoder, x) else: x = self.transformer_encoder(x) # (B, T, N, C') x = deblockify(x, self.block_size) # (B, H', W', C') # Channel-first for block aggregation, and generally to replicate convnet feature map at each stage return x.permute(0, 3, 1, 2) # (B, C, H', W') class Nest(nn.Module): """ Nested Transformer (NesT) A PyTorch impl of : `Aggregating Nested Transformers` - https://arxiv.org/abs/2105.12723 """ def __init__( self, img_size=224, in_chans=3, patch_size=4, num_levels=3, embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), num_classes=1000, mlp_ratio=4., qkv_bias=True, drop_rate=0., proj_drop_rate=0., attn_drop_rate=0., drop_path_rate=0.5, norm_layer=None, act_layer=None, pad_type='', weight_init='', global_pool='avg', ): """ Args: img_size (int, tuple): input image size in_chans (int): number of input channels patch_size (int): patch size num_levels (int): number of block hierarchies (T_d in the paper) embed_dims (int, tuple): embedding dimensions of each level num_heads (int, tuple): number of attention heads for each level depths (int, tuple): number of transformer layers for each level num_classes (int): number of classes for classification head mlp_ratio (int): ratio of mlp hidden dim to embedding dim for MLP of transformer layers qkv_bias (bool): enable bias for qkv if True drop_rate (float): dropout rate for MLP of transformer layers, MSA final projection layer, and classifier attn_drop_rate (float): attention dropout rate drop_path_rate (float): stochastic depth rate norm_layer: (nn.Module): normalization layer for transformer layers act_layer: (nn.Module): activation layer in MLP of transformer layers pad_type: str: Type of padding to use '' for PyTorch symmetric, 'same' for TF SAME weight_init: (str): weight init scheme global_pool: (str): type of pooling operation to apply to final feature map Notes: - Default values follow NesT-B from the original Jax code. - `embed_dims`, `num_heads`, `depths` should be ints or tuples with length `num_levels`. - For those following the paper, Table A1 may have errors! - https://github.com/google-research/nested-transformer/issues/2 """ super().__init__() for param_name in ['embed_dims', 'num_heads', 'depths']: param_value = locals()[param_name] if isinstance(param_value, collections.abc.Sequence): assert len(param_value) == num_levels, f'Require `len({param_name}) == num_levels`' embed_dims = to_ntuple(num_levels)(embed_dims) num_heads = to_ntuple(num_levels)(num_heads) depths = to_ntuple(num_levels)(depths) self.num_classes = num_classes self.num_features = embed_dims[-1] self.feature_info = [] norm_layer = norm_layer or LayerNorm act_layer = act_layer or nn.GELU self.drop_rate = drop_rate self.num_levels = num_levels if isinstance(img_size, collections.abc.Sequence): assert img_size[0] == img_size[1], 'Model only handles square inputs' img_size = img_size[0] assert img_size % patch_size == 0, '`patch_size` must divide `img_size` evenly' self.patch_size = patch_size # Number of blocks at each level self.num_blocks = (4 ** torch.arange(num_levels)).flip(0).tolist() assert (img_size // patch_size) % math.sqrt(self.num_blocks[0]) == 0, \ 'First level blocks don\'t fit evenly. Check `img_size`, `patch_size`, and `num_levels`' # Block edge size in units of patches # Hint: (img_size // patch_size) gives number of patches along edge of image. sqrt(self.num_blocks[0]) is the # number of blocks along edge of image self.block_size = int((img_size // patch_size) // math.sqrt(self.num_blocks[0])) # Patch embedding self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dims[0], flatten=False, ) self.num_patches = self.patch_embed.num_patches self.seq_length = self.num_patches // self.num_blocks[0] # Build up each hierarchical level levels = [] dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] prev_dim = None curr_stride = 4 for i in range(len(self.num_blocks)): dim = embed_dims[i] levels.append(NestLevel( self.num_blocks[i], self.block_size, self.seq_length, num_heads[i], depths[i], dim, prev_dim, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, proj_drop=proj_drop_rate, attn_drop=attn_drop_rate, drop_path=dp_rates[i], norm_layer=norm_layer, act_layer=act_layer, pad_type=pad_type, )) self.feature_info += [dict(num_chs=dim, reduction=curr_stride, module=f'levels.{i}')] prev_dim = dim curr_stride *= 2 self.levels = nn.Sequential(*levels) # Final normalization layer self.norm = norm_layer(embed_dims[-1]) # Classifier global_pool, head = create_classifier(self.num_features, self.num_classes, pool_type=global_pool) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.head = head self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert mode in ('nlhb', '') head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0. for level in self.levels: trunc_normal_(level.pos_embed, std=.02, a=-2, b=2) named_apply(partial(_init_nest_weights, head_bias=head_bias), self) @torch.jit.ignore def no_weight_decay(self): return {f'level.{i}.pos_embed' for i in range(len(self.levels))} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^patch_embed', # stem and embed blocks=[ (r'^levels\.(\d+)' if coarse else r'^levels\.(\d+)\.transformer_encoder\.(\d+)', None), (r'^levels\.(\d+)\.(?:pool|pos_embed)', (0,)), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for l in self.levels: l.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.head = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): x = self.patch_embed(x) x = self.levels(x) # Layer norm done over channel dim only (to NHWC and back) x = self.norm(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) return x def forward_head(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_nest_weights(module: nn.Module, name: str = '', head_bias: float = 0.): """ NesT weight initialization Can replicate Jax implementation. Otherwise follows vision_transformer.py """ if isinstance(module, nn.Linear): if name.startswith('head'): trunc_normal_(module.weight, std=.02, a=-2, b=2) nn.init.constant_(module.bias, head_bias) else: trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02, a=-2, b=2) if module.bias is not None: nn.init.zeros_(module.bias) def resize_pos_embed(posemb, posemb_new): """ Rescale the grid of position embeddings when loading from state_dict Expected shape of position embeddings is (1, T, N, C), and considers only square images """ _logger.info('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape) seq_length_old = posemb.shape[2] num_blocks_new, seq_length_new = posemb_new.shape[1:3] size_new = int(math.sqrt(num_blocks_new*seq_length_new)) # First change to (1, C, H, W) posemb = deblockify(posemb, int(math.sqrt(seq_length_old))).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=[size_new, size_new], mode='bicubic', align_corners=False) # Now change to new (1, T, N, C) posemb = blockify(posemb.permute(0, 2, 3, 1), int(math.sqrt(seq_length_new))) return posemb def checkpoint_filter_fn(state_dict, model): """ resize positional embeddings of pretrained weights """ pos_embed_keys = [k for k in state_dict.keys() if k.startswith('pos_embed_')] for k in pos_embed_keys: if state_dict[k].shape != getattr(model, k).shape: state_dict[k] = resize_pos_embed(state_dict[k], getattr(model, k)) return state_dict def _create_nest(variant, pretrained=False, **kwargs): model = build_model_with_cfg( Nest, variant, pretrained, feature_cfg=dict(out_indices=(0, 1, 2), flatten_sequential=True), pretrained_filter_fn=checkpoint_filter_fn, **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': [14, 14], 'crop_pct': .875, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ 'nest_base.untrained': _cfg(), 'nest_small.untrained': _cfg(), 'nest_tiny.untrained': _cfg(), # (weights from official Google JAX impl, require 'SAME' padding) 'nest_base_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_small_jx.goog_in1k': _cfg(hf_hub_id='timm/'), 'nest_tiny_jx.goog_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def nest_base(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny', pretrained=pretrained, **model_kwargs) return model @register_model def nest_base_jx(pretrained=False, **kwargs) -> Nest: """ Nest-B @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict( embed_dims=(128, 256, 512), num_heads=(4, 8, 16), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_base_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_small_jx(pretrained=False, **kwargs) -> Nest: """ Nest-S @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 20), **kwargs) model = _create_nest('nest_small_jx', pretrained=pretrained, **model_kwargs) return model @register_model def nest_tiny_jx(pretrained=False, **kwargs) -> Nest: """ Nest-T @ 224x224 """ kwargs.setdefault('pad_type', 'same') model_kwargs = dict(embed_dims=(96, 192, 384), num_heads=(3, 6, 12), depths=(2, 2, 8), **kwargs) model = _create_nest('nest_tiny_jx', pretrained=pretrained, **model_kwargs) return model register_model_deprecations(__name__, { 'jx_nest_base': 'nest_base_jx', 'jx_nest_small': 'nest_small_jx', 'jx_nest_tiny': 'nest_tiny_jx', })
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/focalnet.py
""" FocalNet As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926 Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet This impl is/has: * fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible * re-ordered downsample / layer so that striding always at beginning of layer (stage) * no input size constraints or input resolution/H/W tracking through the model * torchscript fixed and a number of quirks cleaned up * feature extraction support via `features_only=True` """ # -------------------------------------------------------- # FocalNets -- Focal Modulation Networks # Copyright (c) 2022 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Jianwei Yang (jianwyan@microsoft.com) # -------------------------------------------------------- from functools import partial from typing import Callable, Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import generate_default_cfgs, register_model __all__ = ['FocalNet'] class FocalModulation(nn.Module): def __init__( self, dim: int, focal_window, focal_level: int, focal_factor: int = 2, bias: bool = True, use_post_norm: bool = False, normalize_modulator: bool = False, proj_drop: float = 0., norm_layer: Callable = LayerNorm2d, ): super().__init__() self.dim = dim self.focal_window = focal_window self.focal_level = focal_level self.focal_factor = focal_factor self.use_post_norm = use_post_norm self.normalize_modulator = normalize_modulator self.input_split = [dim, dim, self.focal_level + 1] self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias) self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias) self.act = nn.GELU() self.proj = nn.Conv2d(dim, dim, kernel_size=1) self.proj_drop = nn.Dropout(proj_drop) self.focal_layers = nn.ModuleList() self.kernel_sizes = [] for k in range(self.focal_level): kernel_size = self.focal_factor * k + self.focal_window self.focal_layers.append(nn.Sequential( nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False), nn.GELU(), )) self.kernel_sizes.append(kernel_size) self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity() def forward(self, x): # pre linear projection x = self.f(x) q, ctx, gates = torch.split(x, self.input_split, 1) # context aggreation ctx_all = 0 for l, focal_layer in enumerate(self.focal_layers): ctx = focal_layer(ctx) ctx_all = ctx_all + ctx * gates[:, l:l + 1] ctx_global = self.act(ctx.mean((2, 3), keepdim=True)) ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:] # normalize context if self.normalize_modulator: ctx_all = ctx_all / (self.focal_level + 1) # focal modulation x_out = q * self.h(ctx_all) x_out = self.norm(x_out) # post linear projection x_out = self.proj(x_out) x_out = self.proj_drop(x_out) return x_out class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class FocalNetBlock(nn.Module): """ Focal Modulation Network Block. """ def __init__( self, dim: int, mlp_ratio: float = 4., focal_level: int = 1, focal_window: int = 3, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, layerscale_value: float = 1e-4, proj_drop: float = 0., drop_path: float = 0., act_layer: Callable = nn.GELU, norm_layer: Callable = LayerNorm2d, ): """ Args: dim: Number of input channels. mlp_ratio: Ratio of mlp hidden dim to embedding dim. focal_level: Number of focal levels. focal_window: Focal window size at first focal level. use_post_norm: Whether to use layer norm after modulation. use_post_norm_in_modulation: Whether to use layer norm in modulation. layerscale_value: Initial layerscale value. proj_drop: Dropout rate. drop_path: Stochastic depth rate. act_layer: Activation layer. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.mlp_ratio = mlp_ratio self.focal_window = focal_window self.focal_level = focal_level self.use_post_norm = use_post_norm self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity() self.modulation = FocalModulation( dim, focal_window=focal_window, focal_level=self.focal_level, use_post_norm=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, proj_drop=proj_drop, norm_layer=norm_layer, ) self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity() self.mlp = Mlp( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop, use_conv=True, ) self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity() self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): shortcut = x # Focal Modulation x = self.norm1(x) x = self.modulation(x) x = self.norm1_post(x) x = shortcut + self.drop_path1(self.ls1(x)) # FFN x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x))))) return x class FocalNetStage(nn.Module): """ A basic Focal Transformer layer for one stage. """ def __init__( self, dim: int, out_dim: int, depth: int, mlp_ratio: float = 4., downsample: bool = True, focal_level: int = 1, focal_window: int = 1, use_overlap_down: bool = False, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, layerscale_value: float = 1e-4, proj_drop: float = 0., drop_path: float = 0., norm_layer: Callable = LayerNorm2d, ): """ Args: dim: Number of input channels. out_dim: Number of output channels. depth: Number of blocks. mlp_ratio: Ratio of mlp hidden dim to embedding dim. downsample: Downsample layer at start of the layer. focal_level: Number of focal levels focal_window: Focal window size at first focal level use_overlap_down: User overlapped convolution in downsample layer. use_post_norm: Whether to use layer norm after modulation. use_post_norm_in_modulation: Whether to use layer norm in modulation. layerscale_value: Initial layerscale value proj_drop: Dropout rate for projections. drop_path: Stochastic depth rate. norm_layer: Normalization layer. """ super().__init__() self.dim = dim self.depth = depth self.grad_checkpointing = False if downsample: self.downsample = Downsample( in_chs=dim, out_chs=out_dim, stride=2, overlap=use_overlap_down, norm_layer=norm_layer, ) else: self.downsample = nn.Identity() # build blocks self.blocks = nn.ModuleList([ FocalNetBlock( dim=out_dim, mlp_ratio=mlp_ratio, focal_level=focal_level, focal_window=focal_window, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop, drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer, ) for i in range(depth)]) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable def forward(self, x): x = self.downsample(x) for blk in self.blocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint.checkpoint(blk, x) else: x = blk(x) return x class Downsample(nn.Module): def __init__( self, in_chs: int, out_chs: int, stride: int = 4, overlap: bool = False, norm_layer: Optional[Callable] = None, ): """ Args: in_chs: Number of input image channels. out_chs: Number of linear projection output channels. stride: Downsample stride. overlap: Use overlapping convolutions if True. norm_layer: Normalization layer. """ super().__init__() self.stride = stride padding = 0 kernel_size = stride if overlap: assert stride in (2, 4) if stride == 4: kernel_size, padding = 7, 2 elif stride == 2: kernel_size, padding = 3, 1 self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding) self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity() def forward(self, x): x = self.proj(x) x = self.norm(x) return x class FocalNet(nn.Module): """" Focal Modulation Networks (FocalNets) """ def __init__( self, in_chans: int = 3, num_classes: int = 1000, global_pool: str = 'avg', embed_dim: int = 96, depths: Tuple[int, ...] = (2, 2, 6, 2), mlp_ratio: float = 4., focal_levels: Tuple[int, ...] = (2, 2, 2, 2), focal_windows: Tuple[int, ...] = (3, 3, 3, 3), use_overlap_down: bool = False, use_post_norm: bool = False, use_post_norm_in_modulation: bool = False, normalize_modulator: bool = False, head_hidden_size: Optional[int] = None, head_init_scale: float = 1.0, layerscale_value: Optional[float] = None, drop_rate: bool = 0., proj_drop_rate: bool = 0., drop_path_rate: bool = 0.1, norm_layer: Callable = partial(LayerNorm2d, eps=1e-5), ): """ Args: in_chans: Number of input image channels. num_classes: Number of classes for classification head. embed_dim: Patch embedding dimension. depths: Depth of each Focal Transformer layer. mlp_ratio: Ratio of mlp hidden dim to embedding dim. focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level. focal_windows: The focal window size at all stages. use_overlap_down: Whether to use convolutional embedding. use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models) layerscale_value: Value for layer scale. drop_rate: Dropout rate. drop_path_rate: Stochastic depth rate. norm_layer: Normalization layer. """ super().__init__() self.num_layers = len(depths) embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)] self.num_classes = num_classes self.embed_dim = embed_dim self.num_features = embed_dim[-1] self.feature_info = [] self.stem = Downsample( in_chs=in_chans, out_chs=embed_dim[0], overlap=use_overlap_down, norm_layer=norm_layer, ) in_dim = embed_dim[0] dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule layers = [] for i_layer in range(self.num_layers): out_dim = embed_dim[i_layer] layer = FocalNetStage( dim=in_dim, out_dim=out_dim, depth=depths[i_layer], mlp_ratio=mlp_ratio, downsample=i_layer > 0, focal_level=focal_levels[i_layer], focal_window=focal_windows[i_layer], use_overlap_down=use_overlap_down, use_post_norm=use_post_norm, use_post_norm_in_modulation=use_post_norm_in_modulation, normalize_modulator=normalize_modulator, layerscale_value=layerscale_value, proj_drop=proj_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], norm_layer=norm_layer, ) in_dim = out_dim layers += [layer] self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')] self.layers = nn.Sequential(*layers) if head_hidden_size: self.norm = nn.Identity() self.head = NormMlpClassifierHead( self.num_features, num_classes, hidden_size=head_hidden_size, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, ) else: self.norm = norm_layer(self.num_features) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate ) named_apply(partial(_init_weights, head_init_scale=head_init_scale), self) @torch.jit.ignore def no_weight_decay(self): return {''} @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^layers\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^layers\.(\d+).downsample', (0,)), (r'^layers\.(\d+)\.\w+\.(\d+)', None), (r'^norm', (99999,)), ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable for l in self.layers: l.set_grad_checkpointing(enable=enable) @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.layers(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module, name=None, head_init_scale=1.0): if isinstance(module, nn.Conv2d): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Linear): trunc_normal_(module.weight, std=.02) if module.bias is not None: nn.init.zeros_(module.bias) if name and 'head.fc' in name: module.weight.data.mul_(head_init_scale) module.bias.data.mul_(head_init_scale) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', 'license': 'mit', **kwargs } default_cfgs = generate_default_cfgs({ "focalnet_tiny_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_small_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_base_srf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_tiny_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_small_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_base_lrf.ms_in1k": _cfg( hf_hub_id='timm/'), "focalnet_large_fl3.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_large_fl4.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_xlarge_fl3.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_xlarge_fl4.ms_in22k": _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842), "focalnet_huge_fl3.ms_in22k": _cfg( hf_hub_id='timm/', num_classes=21842), "focalnet_huge_fl4.ms_in22k": _cfg( hf_hub_id='timm/', num_classes=0), }) def checkpoint_filter_fn(state_dict, model: FocalNet): state_dict = state_dict.get('model', state_dict) if 'stem.proj.weight' in state_dict: return state_dict import re out_dict = {} dest_dict = model.state_dict() for k, v in state_dict.items(): k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k) k = k.replace('patch_embed', 'stem') k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k) if 'norm' in k and k not in dest_dict: k = re.sub(r'norm([0-9])', r'norm\1_post', k) k = k.replace('ln.', 'norm.') k = k.replace('head', 'head.fc') if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape: v = v.reshape(dest_dict[k].shape) out_dict[k] = v return out_dict def _create_focalnet(variant, pretrained=False, **kwargs): default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1)))) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( FocalNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs) return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs) return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs) @register_model def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs) return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs) # FocalNet large+ models @register_model def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4, use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4, use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs) @register_model def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet: model_kwargs = dict( depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4], use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs) return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/repvit.py
""" RepViT Paper: `RepViT: Revisiting Mobile CNN From ViT Perspective` - https://arxiv.org/abs/2307.09283 @misc{wang2023repvit, title={RepViT: Revisiting Mobile CNN From ViT Perspective}, author={Ao Wang and Hui Chen and Zijia Lin and Hengjun Pu and Guiguang Ding}, year={2023}, eprint={2307.09283}, archivePrefix={arXiv}, primaryClass={cs.CV} } Adapted from official impl at https://github.com/jameslahm/RepViT """ __all__ = ['RepViT'] import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._registry import register_model, generate_default_cfgs from ._builder import build_model_with_cfg from timm.layers import SqueezeExcite, trunc_normal_, to_ntuple, to_2tuple from ._manipulate import checkpoint_seq import torch class ConvNorm(nn.Sequential): def __init__(self, in_dim, out_dim, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1): super().__init__() self.add_module('c', nn.Conv2d(in_dim, out_dim, ks, stride, pad, dilation, groups, bias=False)) self.add_module('bn', nn.BatchNorm2d(out_dim)) nn.init.constant_(self.bn.weight, bn_weight_init) nn.init.constant_(self.bn.bias, 0) @torch.no_grad() def fuse(self): c, bn = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 w = c.weight * w[:, None, None, None] b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5 m = nn.Conv2d( w.size(1) * self.c.groups, w.size(0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups, device=c.weight.device, ) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class NormLinear(nn.Sequential): def __init__(self, in_dim, out_dim, bias=True, std=0.02): super().__init__() self.add_module('bn', nn.BatchNorm1d(in_dim)) self.add_module('l', nn.Linear(in_dim, out_dim, bias=bias)) trunc_normal_(self.l.weight, std=std) if bias: nn.init.constant_(self.l.bias, 0) @torch.no_grad() def fuse(self): bn, l = self._modules.values() w = bn.weight / (bn.running_var + bn.eps) ** 0.5 b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5 w = l.weight * w[None, :] if l.bias is None: b = b @ self.l.weight.T else: b = (l.weight @ b[:, None]).view(-1) + self.l.bias m = nn.Linear(w.size(1), w.size(0), device=l.weight.device) m.weight.data.copy_(w) m.bias.data.copy_(b) return m class RepVGGDW(nn.Module): def __init__(self, ed, kernel_size): super().__init__() self.conv = ConvNorm(ed, ed, kernel_size, 1, (kernel_size - 1) // 2, groups=ed) self.conv1 = ConvNorm(ed, ed, 1, 1, 0, groups=ed) self.dim = ed def forward(self, x): return self.conv(x) + self.conv1(x) + x @torch.no_grad() def fuse(self): conv = self.conv.fuse() conv1 = self.conv1.fuse() conv_w = conv.weight conv_b = conv.bias conv1_w = conv1.weight conv1_b = conv1.bias conv1_w = nn.functional.pad(conv1_w, [1, 1, 1, 1]) identity = nn.functional.pad( torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1, 1, 1, 1] ) final_conv_w = conv_w + conv1_w + identity final_conv_b = conv_b + conv1_b conv.weight.data.copy_(final_conv_w) conv.bias.data.copy_(final_conv_b) return conv class RepViTMlp(nn.Module): def __init__(self, in_dim, hidden_dim, act_layer): super().__init__() self.conv1 = ConvNorm(in_dim, hidden_dim, 1, 1, 0) self.act = act_layer() self.conv2 = ConvNorm(hidden_dim, in_dim, 1, 1, 0, bn_weight_init=0) def forward(self, x): return self.conv2(self.act(self.conv1(x))) class RepViTBlock(nn.Module): def __init__(self, in_dim, mlp_ratio, kernel_size, use_se, act_layer): super(RepViTBlock, self).__init__() self.token_mixer = RepVGGDW(in_dim, kernel_size) self.se = SqueezeExcite(in_dim, 0.25) if use_se else nn.Identity() self.channel_mixer = RepViTMlp(in_dim, in_dim * mlp_ratio, act_layer) def forward(self, x): x = self.token_mixer(x) x = self.se(x) identity = x x = self.channel_mixer(x) return identity + x class RepViTStem(nn.Module): def __init__(self, in_chs, out_chs, act_layer): super().__init__() self.conv1 = ConvNorm(in_chs, out_chs // 2, 3, 2, 1) self.act1 = act_layer() self.conv2 = ConvNorm(out_chs // 2, out_chs, 3, 2, 1) self.stride = 4 def forward(self, x): return self.conv2(self.act1(self.conv1(x))) class RepViTDownsample(nn.Module): def __init__(self, in_dim, mlp_ratio, out_dim, kernel_size, act_layer): super().__init__() self.pre_block = RepViTBlock(in_dim, mlp_ratio, kernel_size, use_se=False, act_layer=act_layer) self.spatial_downsample = ConvNorm(in_dim, in_dim, kernel_size, 2, (kernel_size - 1) // 2, groups=in_dim) self.channel_downsample = ConvNorm(in_dim, out_dim, 1, 1) self.ffn = RepViTMlp(out_dim, out_dim * mlp_ratio, act_layer) def forward(self, x): x = self.pre_block(x) x = self.spatial_downsample(x) x = self.channel_downsample(x) identity = x x = self.ffn(x) return x + identity class RepViTClassifier(nn.Module): def __init__(self, dim, num_classes, distillation=False): super().__init__() self.head = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() self.distillation = distillation if distillation: self.head_dist = NormLinear(dim, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x): if self.distillation: x1, x2 = self.head(x), self.head_dist(x) if (not self.training) or torch.jit.is_scripting(): return (x1 + x2) / 2 else: return x1, x2 else: x = self.head(x) return x @torch.no_grad() def fuse(self): if not self.num_classes > 0: return nn.Identity() head = self.head.fuse() if self.distillation: head_dist = self.head_dist.fuse() head.weight += head_dist.weight head.bias += head_dist.bias head.weight /= 2 head.bias /= 2 return head else: return head class RepViTStage(nn.Module): def __init__(self, in_dim, out_dim, depth, mlp_ratio, act_layer, kernel_size=3, downsample=True): super().__init__() if downsample: self.downsample = RepViTDownsample(in_dim, mlp_ratio, out_dim, kernel_size, act_layer) else: assert in_dim == out_dim self.downsample = nn.Identity() blocks = [] use_se = True for _ in range(depth): blocks.append(RepViTBlock(out_dim, mlp_ratio, kernel_size, use_se, act_layer)) use_se = not use_se self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class RepViT(nn.Module): def __init__( self, in_chans=3, img_size=224, embed_dim=(48,), depth=(2,), mlp_ratio=2, global_pool='avg', kernel_size=3, num_classes=1000, act_layer=nn.GELU, distillation=True, ): super(RepViT, self).__init__() self.grad_checkpointing = False self.global_pool = global_pool self.embed_dim = embed_dim self.num_classes = num_classes in_dim = embed_dim[0] self.stem = RepViTStem(in_chans, in_dim, act_layer) stride = self.stem.stride resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))]) num_stages = len(embed_dim) mlp_ratios = to_ntuple(num_stages)(mlp_ratio) self.feature_info = [] stages = [] for i in range(num_stages): downsample = True if i != 0 else False stages.append( RepViTStage( in_dim, embed_dim[i], depth[i], mlp_ratio=mlp_ratios[i], act_layer=act_layer, kernel_size=kernel_size, downsample=downsample, ) ) stage_stride = 2 if downsample else 1 stride *= stage_stride resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution]) self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')] in_dim = embed_dim[i] self.stages = nn.Sequential(*stages) self.num_features = embed_dim[-1] self.head = RepViTClassifier(embed_dim[-1], num_classes, distillation) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None, distillation=False): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = ( RepViTClassifier(self.embed_dim[-1], num_classes, distillation) if num_classes > 0 else nn.Identity() ) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x) else: x = self.stages(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = nn.functional.adaptive_avg_pool2d(x, 1).flatten(1) return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x @torch.no_grad() def fuse(self): def fuse_children(net): for child_name, child in net.named_children(): if hasattr(child, 'fuse'): fused = child.fuse() setattr(net, child_name, fused) fuse_children(fused) else: fuse_children(child) fuse_children(self) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.c', 'classifier': ('head.head.l', 'head.head_dist.l'), **kwargs, } default_cfgs = generate_default_cfgs( { 'repvit_m1.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m1_distill_300_timm.pth' ), 'repvit_m2.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m2_distill_300_timm.pth' ), 'repvit_m3.dist_in1k': _cfg( url='https://github.com/THU-MIG/RepViT/releases/download/v1.0/repvit_m3_distill_300_timm.pth' ), } ) def _create_repvit(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( RepViT, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs ) return model @register_model def repvit_m1(pretrained=False, **kwargs): """ Constructs a RepViT-M1 model """ model_args = dict(embed_dim=(48, 96, 192, 384), depth=(2, 2, 14, 2)) return _create_repvit('repvit_m1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m2(pretrained=False, **kwargs): """ Constructs a RepViT-M2 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(2, 2, 12, 2)) return _create_repvit('repvit_m2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def repvit_m3(pretrained=False, **kwargs): """ Constructs a RepViT-M3 model """ model_args = dict(embed_dim=(64, 128, 256, 512), depth=(4, 4, 18, 2)) return _create_repvit('repvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/hrnet.py
""" HRNet Copied from https://github.com/HRNet/HRNet-Image-Classification Original header: Copyright (c) Microsoft Licensed under the MIT License. Written by Bin Xiao (Bin.Xiao@microsoft.com) Modified by Ke Sun (sunk@mail.ustc.edu.cn) """ import logging from typing import List import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_classifier from ._builder import build_model_with_cfg, pretrained_cfg_for_features from ._features import FeatureInfo from ._registry import register_model, generate_default_cfgs from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE __all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this _BN_MOMENTUM = 0.1 _logger = logging.getLogger(__name__) cfg_cls = dict( hrnet_w18_small=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(1,), num_channels=(32,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(16, 32), fuse_method='SUM' ), stage3=dict( num_modules=1, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(16, 32, 64), fuse_method='SUM' ), stage4=dict( num_modules=1, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(16, 32, 64, 128), fuse_method='SUM', ), ), hrnet_w18_small_v2=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(2,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(2, 2), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=3, num_branches=3, block_type='BASIC', num_blocks=(2, 2, 2), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=2, num_branches=4, block_type='BASIC', num_blocks=(2, 2, 2, 2), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w18=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(18, 36), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(18, 36, 72), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(18, 36, 72, 144), fuse_method='SUM', ), ), hrnet_w30=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(30, 60), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(30, 60, 120), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(30, 60, 120, 240), fuse_method='SUM', ), ), hrnet_w32=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(32, 64), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256), fuse_method='SUM', ), ), hrnet_w40=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(40, 80), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(40, 80, 160), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(40, 80, 160, 320), fuse_method='SUM', ), ), hrnet_w44=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(44, 88), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(44, 88, 176), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(44, 88, 176, 352), fuse_method='SUM', ), ), hrnet_w48=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(48, 96), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(48, 96, 192), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(48, 96, 192, 384), fuse_method='SUM', ), ), hrnet_w64=dict( stem_width=64, stage1=dict( num_modules=1, num_branches=1, block_type='BOTTLENECK', num_blocks=(4,), num_channels=(64,), fuse_method='SUM', ), stage2=dict( num_modules=1, num_branches=2, block_type='BASIC', num_blocks=(4, 4), num_channels=(64, 128), fuse_method='SUM' ), stage3=dict( num_modules=4, num_branches=3, block_type='BASIC', num_blocks=(4, 4, 4), num_channels=(64, 128, 256), fuse_method='SUM' ), stage4=dict( num_modules=3, num_branches=4, block_type='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(64, 128, 256, 512), fuse_method='SUM', ), ) ) class HighResolutionModule(nn.Module): def __init__( self, num_branches, block_types, num_blocks, num_in_chs, num_channels, fuse_method, multi_scale_output=True, ): super(HighResolutionModule, self).__init__() self._check_branches( num_branches, block_types, num_blocks, num_in_chs, num_channels, ) self.num_in_chs = num_in_chs self.fuse_method = fuse_method self.num_branches = num_branches self.multi_scale_output = multi_scale_output self.branches = self._make_branches( num_branches, block_types, num_blocks, num_channels, ) self.fuse_layers = self._make_fuse_layers() self.fuse_act = nn.ReLU(False) def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels): error_msg = '' if num_branches != len(num_blocks): error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks)) elif num_branches != len(num_channels): error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels)) elif num_branches != len(num_in_chs): error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs)) if error_msg: _logger.error(error_msg) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion: downsample = nn.Sequential( nn.Conv2d( self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)] self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion for i in range(1, num_blocks[branch_index]): layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index])) return nn.Sequential(*layers) def _make_branches(self, num_branches, block_type, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return nn.Identity() num_branches = self.num_branches num_in_chs = self.num_in_chs fuse_layers = [] for i in range(num_branches if self.multi_scale_output else 1): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False), nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM), nn.Upsample(scale_factor=2 ** (j - i), mode='nearest'))) elif j == i: fuse_layer.append(nn.Identity()) else: conv3x3s = [] for k in range(i - j): if k == i - j - 1: num_out_chs_conv3x3 = num_in_chs[i] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM) )) else: num_out_chs_conv3x3 = num_in_chs[j] conv3x3s.append(nn.Sequential( nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False), nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM), nn.ReLU(False) )) fuse_layer.append(nn.Sequential(*conv3x3s)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def get_num_in_chs(self): return self.num_in_chs def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]: if self.num_branches == 1: return [self.branches[0](x[0])] for i, branch in enumerate(self.branches): x[i] = branch(x[i]) x_fuse = [] for i, fuse_outer in enumerate(self.fuse_layers): y = None for j, f in enumerate(fuse_outer): if y is None: y = f(x[j]) else: y = y + f(x[j]) x_fuse.append(self.fuse_act(y)) return x_fuse class SequentialList(nn.Sequential): def __init__(self, *args): super(SequentialList, self).__init__(*args) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (List[torch.Tensor]) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (List[torch.Tensor]) pass def forward(self, x) -> List[torch.Tensor]: for module in self: x = module(x) return x @torch.jit.interface class ModuleInterface(torch.nn.Module): def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward pass block_types_dict = { 'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck } class HighResolutionNet(nn.Module): def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, head='classification', **kwargs, ): super(HighResolutionNet, self).__init__() self.num_classes = num_classes assert output_stride == 32 # FIXME support dilation cfg.update(**kwargs) stem_width = cfg['stem_width'] self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM) self.act1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM) self.act2 = nn.ReLU(inplace=True) self.stage1_cfg = cfg['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = block_types_dict[self.stage1_cfg['block_type']] num_blocks = self.stage1_cfg['num_blocks'][0] self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks) stage1_out_channel = block_type.expansion * num_channels self.stage2_cfg = cfg['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = block_types_dict[self.stage2_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels) self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels) self.stage3_cfg = cfg['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = block_types_dict[self.stage3_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels) self.stage4_cfg = cfg['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = block_types_dict[self.stage4_cfg['block_type']] num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True) self.head = head self.head_channels = None # set if _make_head called head_conv_bias = cfg.pop('head_conv_bias', True) if head == 'classification': # Classification Head self.num_features = 2048 self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head( pre_stage_channels, conv_bias=head_conv_bias, ) self.global_pool, self.head_drop, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate, ) else: if head == 'incre': self.num_features = 2048 self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True) else: self.num_features = 256 self.incre_modules = None self.global_pool = nn.Identity() self.head_drop = nn.Identity() self.classifier = nn.Identity() curr_stride = 2 # module names aren't actually valid here, hook or FeatureNet based extraction would not work self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')] for i, c in enumerate(self.head_channels if self.head_channels else num_channels): curr_stride *= 2 c = c * 4 if self.head_channels else c # head block_type expansion factor of 4 self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')] self.init_weights() def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True): head_block_type = Bottleneck self.head_channels = [32, 64, 128, 256] # Increasing the #channels on each resolution # from C, 2C, 4C, 8C to 128, 256, 512, 1024 incre_modules = [] for i, channels in enumerate(pre_stage_channels): incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1)) incre_modules = nn.ModuleList(incre_modules) if incre_only: return incre_modules, None, None # downsampling modules downsamp_modules = [] for i in range(len(pre_stage_channels) - 1): in_channels = self.head_channels[i] * head_block_type.expansion out_channels = self.head_channels[i + 1] * head_block_type.expansion downsamp_module = nn.Sequential( nn.Conv2d( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=2, padding=1, bias=conv_bias), nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) downsamp_modules.append(downsamp_module) downsamp_modules = nn.ModuleList(downsamp_modules) final_layer = nn.Sequential( nn.Conv2d( in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features, kernel_size=1, stride=1, padding=0, bias=conv_bias), nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True) ) return incre_modules, downsamp_modules, final_layer def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append(nn.Sequential( nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False), nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) else: transition_layers.append(nn.Identity()) else: conv3x3s = [] for j in range(i + 1 - num_branches_pre): _in_chs = num_channels_pre_layer[-1] _out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs conv3x3s.append(nn.Sequential( nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False), nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM), nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv3x3s)) return nn.ModuleList(transition_layers) def _make_layer(self, block_type, inplanes, planes, block_types, stride=1): downsample = None if stride != 1 or inplanes != planes * block_type.expansion: downsample = nn.Sequential( nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM), ) layers = [block_type(inplanes, planes, stride, downsample)] inplanes = planes * block_type.expansion for i in range(1, block_types): layers.append(block_type(inplanes, planes)) return nn.Sequential(*layers) def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block_type = block_types_dict[layer_config['block_type']] fuse_method = layer_config['fuse_method'] modules = [] for i in range(num_modules): # multi_scale_output is only used last module reset_multi_scale_output = multi_scale_output or i < num_modules - 1 modules.append(HighResolutionModule( num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output) ) num_in_chs = modules[-1].get_num_in_chs() return SequentialList(*modules), num_in_chs @torch.jit.ignore def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv[12]|bn[12]', block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [ (r'^layer(\d+)\.(\d+)', None), (r'^stage(\d+)\.(\d+)', None), (r'^transition(\d+)', (99999,)), ], ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, "gradient checkpointing not supported" @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def stages(self, x) -> List[torch.Tensor]: x = self.layer1(x) xl = [t(x) for i, t in enumerate(self.transition1)] yl = self.stage2(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)] yl = self.stage3(xl) xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)] yl = self.stage4(xl) return yl def forward_features(self, x): # Stem x = self.conv1(x) x = self.bn1(x) x = self.act1(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) # Stages yl = self.stages(x) if self.incre_modules is None or self.downsamp_modules is None: return yl y = None for i, incre in enumerate(self.incre_modules): if y is None: y = incre(yl[i]) else: down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing y = incre(yl[i]) + down.forward(y) y = self.final_layer(y) return y def forward_head(self, x, pre_logits: bool = False): # Classification Head x = self.global_pool(x) x = self.head_drop(x) return x if pre_logits else self.classifier(x) def forward(self, x): y = self.forward_features(x) x = self.forward_head(y) return x class HighResolutionNetFeatures(HighResolutionNet): """HighResolutionNet feature extraction The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so. It would be more complicated to use the FeatureNet helpers. The `feature_location=incre` allows grabbing increased channel count features using part of the classification head. If `feature_location=''` the default HRNet features are returned. First stem conv is used for stride 2 features. """ def __init__( self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.0, feature_location='incre', out_indices=(0, 1, 2, 3, 4), **kwargs, ): assert feature_location in ('incre', '') super(HighResolutionNetFeatures, self).__init__( cfg, in_chans=in_chans, num_classes=num_classes, output_stride=output_stride, global_pool=global_pool, drop_rate=drop_rate, head=feature_location, **kwargs, ) self.feature_info = FeatureInfo(self.feature_info, out_indices) self._out_idx = {f['index'] for f in self.feature_info.get_dicts()} def forward_features(self, x): assert False, 'Not supported' def forward(self, x) -> List[torch.tensor]: out = [] x = self.conv1(x) x = self.bn1(x) x = self.act1(x) if 0 in self._out_idx: out.append(x) x = self.conv2(x) x = self.bn2(x) x = self.act2(x) x = self.stages(x) if self.incre_modules is not None: x = [incre(f) for f, incre in zip(x, self.incre_modules)] for i, f in enumerate(x): if i + 1 in self._out_idx: out.append(f) return out def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs): model_cls = HighResolutionNet features_only = False kwargs_filter = None if model_kwargs.pop('features_only', False): model_cls = HighResolutionNetFeatures kwargs_filter = ('num_classes', 'global_pool') features_only = True cfg_variant = cfg_variant or variant model = build_model_with_cfg( model_cls, variant, pretrained, model_cfg=cfg_cls[cfg_variant], pretrained_strict=not features_only, kwargs_filter=kwargs_filter, **model_kwargs, ) if features_only: model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg) model.default_cfg = model.pretrained_cfg # backwards compat return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18.ms_aug_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, ), 'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'), 'hrnet_w18_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), 'hrnet_w48_ssld.paddle_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288) ), }) @register_model def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small', pretrained, **kwargs) @register_model def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs) @register_model def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w18', pretrained, **kwargs) @register_model def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w30', pretrained, **kwargs) @register_model def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w32', pretrained, **kwargs) @register_model def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w40', pretrained, **kwargs) @register_model def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w44', pretrained, **kwargs) @register_model def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w48', pretrained, **kwargs) @register_model def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet: return _create_hrnet('hrnet_w64', pretrained, **kwargs) @register_model def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs) @register_model def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet: kwargs.setdefault('head_conv_bias', False) return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/sequencer.py
""" Sequencer Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf """ # Copyright (c) 2022. Yuki Tatsunami # Licensed under the Apache License, Version 2.0 (the "License"); import math from functools import partial from itertools import accumulate from typing import Tuple import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead from ._builder import build_model_with_cfg from ._manipulate import named_apply from ._registry import register_model, generate_default_cfgs __all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False): if isinstance(module, nn.Linear): if name.startswith('head'): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) else: if flax: # Flax defaults lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: if 'mlp' in name: nn.init.normal_(module.bias, std=1e-6) else: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)): stdv = 1.0 / math.sqrt(module.hidden_size) for weight in module.parameters(): nn.init.uniform_(weight, -stdv, stdv) elif hasattr(module, 'init_weights'): module.init_weights() class RNNIdentity(nn.Module): def __init__(self, *args, **kwargs): super(RNNIdentity, self).__init__() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]: return x, None class RNN2dBase(nn.Module): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__() self.input_size = input_size self.hidden_size = hidden_size self.output_size = 2 * hidden_size if bidirectional else hidden_size self.union = union self.with_vertical = True self.with_horizontal = True self.with_fc = with_fc self.fc = None if with_fc: if union == "cat": self.fc = nn.Linear(2 * self.output_size, input_size) elif union == "add": self.fc = nn.Linear(self.output_size, input_size) elif union == "vertical": self.fc = nn.Linear(self.output_size, input_size) self.with_horizontal = False elif union == "horizontal": self.fc = nn.Linear(self.output_size, input_size) self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) elif union == "cat": pass if 2 * self.output_size != input_size: raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.") elif union == "add": pass if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") elif union == "vertical": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_horizontal = False elif union == "horizontal": if self.output_size != input_size: raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.") self.with_vertical = False else: raise ValueError("Unrecognized union: " + union) self.rnn_v = RNNIdentity() self.rnn_h = RNNIdentity() def forward(self, x): B, H, W, C = x.shape if self.with_vertical: v = x.permute(0, 2, 1, 3) v = v.reshape(-1, H, C) v, _ = self.rnn_v(v) v = v.reshape(B, W, H, -1) v = v.permute(0, 2, 1, 3) else: v = None if self.with_horizontal: h = x.reshape(-1, W, C) h, _ = self.rnn_h(h) h = h.reshape(B, H, W, -1) else: h = None if v is not None and h is not None: if self.union == "cat": x = torch.cat([v, h], dim=-1) else: x = v + h elif v is not None: x = v elif h is not None: x = h if self.fc is not None: x = self.fc(x) return x class LSTM2d(RNN2dBase): def __init__( self, input_size: int, hidden_size: int, num_layers: int = 1, bias: bool = True, bidirectional: bool = True, union="cat", with_fc=True, ): super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc) if self.with_vertical: self.rnn_v = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) if self.with_horizontal: self.rnn_h = nn.LSTM( input_size, hidden_size, num_layers, batch_first=True, bias=bias, bidirectional=bidirectional, ) class Sequencer2dBlock(nn.Module): def __init__( self, dim, hidden_size, mlp_ratio=3.0, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() channels_dim = int(mlp_ratio * dim) self.norm1 = norm_layer(dim) self.rnn_tokens = rnn_layer( dim, hidden_size, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, ) self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop) def forward(self, x): x = x + self.drop_path(self.rnn_tokens(self.norm1(x))) x = x + self.drop_path(self.mlp_channels(self.norm2(x))) return x class Shuffle(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: B, H, W, C = x.shape r = torch.randperm(H * W) x = x.reshape(B, -1, C) x = x[:, r, :].reshape(B, H, W, -1) return x class Downsample2d(nn.Module): def __init__(self, input_dim, output_dim, patch_size): super().__init__() self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x): x = x.permute(0, 3, 1, 2) x = self.down(x) x = x.permute(0, 2, 3, 1) return x class Sequencer2dStage(nn.Module): def __init__( self, dim, dim_out, depth, patch_size, hidden_size, mlp_ratio, downsample=False, block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_layers=1, bidirectional=True, union="cat", with_fc=True, drop=0., drop_path=0., ): super().__init__() if downsample: self.downsample = Downsample2d(dim, dim_out, patch_size) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): blocks.append(block_layer( dim_out, hidden_size, mlp_ratio=mlp_ratio, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop, drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path, )) self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) x = self.blocks(x) return x class Sequencer2d(nn.Module): def __init__( self, num_classes=1000, img_size=224, in_chans=3, global_pool='avg', layers=(4, 3, 8, 3), patch_sizes=(7, 2, 2, 1), embed_dims=(192, 384, 384, 384), hidden_sizes=(48, 96, 96, 96), mlp_ratios=(3.0, 3.0, 3.0, 3.0), block_layer=Sequencer2dBlock, rnn_layer=LSTM2d, mlp_layer=Mlp, norm_layer=partial(nn.LayerNorm, eps=1e-6), act_layer=nn.GELU, num_rnn_layers=1, bidirectional=True, union="cat", with_fc=True, drop_rate=0., drop_path_rate=0., nlhb=False, stem_norm=False, ): super().__init__() assert global_pool in ('', 'avg') self.num_classes = num_classes self.global_pool = global_pool self.num_features = embed_dims[-1] # num_features for consistency with other models self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC) self.output_fmt = 'NHWC' self.feature_info = [] self.stem = PatchEmbed( img_size=None, patch_size=patch_sizes[0], in_chans=in_chans, embed_dim=embed_dims[0], norm_layer=norm_layer if stem_norm else None, flatten=False, output_fmt='NHWC', ) assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios) reductions = list(accumulate(patch_sizes, lambda x, y: x * y)) stages = [] prev_dim = embed_dims[0] for i, _ in enumerate(embed_dims): stages += [Sequencer2dStage( prev_dim, embed_dims[i], depth=layers[i], downsample=i > 0, patch_size=patch_sizes[i], hidden_size=hidden_sizes[i], mlp_ratio=mlp_ratios[i], block_layer=block_layer, rnn_layer=rnn_layer, mlp_layer=mlp_layer, norm_layer=norm_layer, act_layer=act_layer, num_layers=num_rnn_layers, bidirectional=bidirectional, union=union, with_fc=with_fc, drop=drop_rate, drop_path=drop_path_rate, )] prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')] self.stages = nn.Sequential(*stages) self.norm = norm_layer(embed_dims[-1]) self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate, input_fmt=self.output_fmt, ) self.init_weights(nlhb=nlhb) def init_weights(self, nlhb=False): head_bias = -math.log(self.num_classes) if nlhb else 0. named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first @torch.jit.ignore def group_matcher(self, coarse=False): return dict( stem=r'^stem', blocks=[ (r'^stages\.(\d+)', None), (r'^norm', (99999,)) ] if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^stages\.(\d+)\.downsample', (0,)), (r'^norm', (99999,)) ] ) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): assert not enable, 'gradient checkpointing not supported' @torch.jit.ignore def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=True) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): """ Remap original checkpoints -> timm """ if 'stages.0.blocks.0.norm1.weight' in state_dict: return state_dict # already translated checkpoint if 'model' in state_dict: state_dict = state_dict['model'] import re out_dict = {} for k, v in state_dict.items(): k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k) k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k) k = k.replace('head.', 'head.fc.') out_dict[k] = v return out_dict def _create_sequencer2d(variant, pretrained=False, **kwargs): default_out_indices = tuple(range(3)) out_indices = kwargs.pop('out_indices', default_out_indices) model = build_model_with_cfg( Sequencer2d, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs, ) return model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'), 'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'), }) @register_model def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 8, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, ) model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[4, 3, 14, 3], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d: model_args = dict( layers=[8, 8, 16, 4], patch_sizes=[7, 2, 1, 1], embed_dims=[192, 384, 384, 384], hidden_sizes=[48, 96, 96, 96], mlp_ratios=[3.0, 3.0, 3.0, 3.0], rnn_layer=LSTM2d, bidirectional=True, union="cat", with_fc=True, **kwargs) model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs)) return model
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/models/tresnet.py
""" TResNet: High Performance GPU-Dedicated Architecture https://arxiv.org/pdf/2003.13630.pdf Original model: https://github.com/mrT23/TResNet """ from collections import OrderedDict from functools import partial import torch import torch.nn as nn from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule,\ ConvNormActAa, ConvNormAct, DropPath from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['TResNet'] # model_registry will add each entrypoint fn to this class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None, drop_path_rate=0. ): super(BasicBlock, self).__init__() self.downsample = downsample self.stride = stride act_layer = partial(nn.LeakyReLU, negative_slope=1e-3) if stride == 1: self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv1 = ConvNormActAa( inplanes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False, act_layer=None) self.act = nn.ReLU(inplace=True) rd_chs = max(planes * self.expansion // 4, 64) self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer=None, aa_layer=None, drop_path_rate=0., ): super(Bottleneck, self).__init__() self.downsample = downsample self.stride = stride act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3) self.conv1 = ConvNormAct( inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) if stride == 1: self.conv2 = ConvNormAct( planes, planes, kernel_size=3, stride=1, act_layer=act_layer) else: self.conv2 = ConvNormActAa( planes, planes, kernel_size=3, stride=2, act_layer=act_layer, aa_layer=aa_layer) reduction_chs = max(planes * self.expansion // 8, 64) self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None self.conv3 = ConvNormAct( planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act = nn.ReLU(inplace=True) def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.conv3(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class TResNet(nn.Module): def __init__( self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0., drop_path_rate=0., ): self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False super(TResNet, self).__init__() aa_layer = BlurPool2d act_layer = nn.LeakyReLU # TResnet stages self.inplanes = int(64 * width_factor) self.planes = int(64 * width_factor) if v2: self.inplanes = self.inplanes // 8 * 8 self.planes = self.planes // 8 * 8 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) layer1 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) layer2 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) layer3 = self._make_layer( Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) layer4 = self._make_layer( Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) # body self.body = nn.Sequential(OrderedDict([ ('s2d', SpaceToDepth()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4), ])) self.feature_info = [ dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), ] # head self.num_features = (self.planes * 8) * Bottleneck.expansion self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # model initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) # residual connections special initialization for m in self.modules(): if isinstance(m, BasicBlock): nn.init.zeros_(m.conv2.bn.weight) if isinstance(m, Bottleneck): nn.init.zeros_(m.conv3.bn.weight) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: layers = [] if stride == 2: # avg pooling before 1x1 conv layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [ConvNormAct( self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False, act_layer=None)] downsample = nn.Sequential(*layers) layers = [] for i in range(blocks): layers.append(block( self.inplanes, planes, stride=stride if i == 0 else 1, downsample=downsample if i == 0 else None, use_se=use_se, aa_layer=aa_layer, drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate, )) self.inplanes = planes * block.expansion return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool=None): self.head.reset(num_classes, pool_type=global_pool) def forward_features(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = self.body.s2d(x) x = self.body.conv1(x) x = checkpoint_seq([ self.body.layer1, self.body.layer2, self.body.layer3, self.body.layer4], x, flatten=True) else: x = self.body(x) return x def forward_head(self, x, pre_logits: bool = False): return x if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'body.conv1.conv.weight' in state_dict: return state_dict import re state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) out_dict = {} for k, v in state_dict.items(): k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) if k.endswith('bn.weight'): # convert weight from inplace_abn to batchnorm v = v.abs().add(1e-5) out_dict[k] = v return out_dict def _create_tresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( TResNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_l.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_xl.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), }) @register_model def tresnet_m(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[3, 4, 11, 3], **kwargs) return _create_tresnet('tresnet_m', pretrained=pretrained, **model_kwargs) @register_model def tresnet_l(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[4, 5, 18, 3], width_factor=1.2, **kwargs) return _create_tresnet('tresnet_l', pretrained=pretrained, **model_kwargs) @register_model def tresnet_xl(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[4, 5, 24, 3], width_factor=1.3, **kwargs) return _create_tresnet('tresnet_xl', pretrained=pretrained, **model_kwargs) @register_model def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: model_kwargs = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True, **kwargs) return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **model_kwargs) register_model_deprecations(__name__, { 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', 'tresnet_m_448': 'tresnet_m.miil_in1k_448', 'tresnet_l_448': 'tresnet_l.miil_in1k_448', 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448', })
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/layers/__init__.py
# NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition from timm.layers.activations import * from timm.layers.adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from timm.layers.blur_pool import BlurPool2d from timm.layers.classifier import ClassifierHead, create_classifier from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\ set_layer_config from timm.layers.conv2d_same import Conv2dSame, conv2d_same from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn from timm.layers.create_attn import get_attn, create_attn from timm.layers.create_conv2d import create_conv2d from timm.layers.create_norm import get_norm_layer, create_norm_layer from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from timm.layers.gather_excite import GatherExcite from timm.layers.global_context import GlobalContext from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from timm.layers.inplace_abn import InplaceAbn from timm.layers.linear import Linear from timm.layers.mixed_conv2d import MixedConv2d from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm from timm.layers.padding import get_padding, get_same_padding, pad_same from timm.layers.patch_embed import PatchEmbed from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from timm.layers.selective_kernel import SelectiveKernel from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct from timm.layers.space_to_depth import SpaceToDepthModule from timm.layers.split_attn import SplitAttn from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool from timm.layers.trace_utils import _assert, _float_to_int from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_ import warnings warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", DeprecationWarning)
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b2_pruned.txt
conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[54, 16, 1, 1]***blocks.1.0.bn1.weight:[54]***blocks.1.0.bn1.bias:[54]***blocks.1.0.bn1.running_mean:[54]***blocks.1.0.bn1.running_var:[54]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[54, 1, 3, 3]***blocks.1.0.bn2.weight:[54]***blocks.1.0.bn2.bias:[54]***blocks.1.0.bn2.running_mean:[54]***blocks.1.0.bn2.running_var:[54]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 54, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[54, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[54]***blocks.1.0.conv_pwl.weight:[17, 54, 1, 1]***blocks.1.0.bn3.weight:[17]***blocks.1.0.bn3.bias:[17]***blocks.1.0.bn3.running_mean:[17]***blocks.1.0.bn3.running_var:[17]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[69, 17, 1, 1]***blocks.1.1.bn1.weight:[69]***blocks.1.1.bn1.bias:[69]***blocks.1.1.bn1.running_mean:[69]***blocks.1.1.bn1.running_var:[69]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[69, 1, 3, 3]***blocks.1.1.bn2.weight:[69]***blocks.1.1.bn2.bias:[69]***blocks.1.1.bn2.running_mean:[69]***blocks.1.1.bn2.running_var:[69]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 69, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[69, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[69]***blocks.1.1.conv_pwl.weight:[17, 69, 1, 1]***blocks.1.1.bn3.weight:[17]***blocks.1.1.bn3.bias:[17]***blocks.1.1.bn3.running_mean:[17]***blocks.1.1.bn3.running_var:[17]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[61, 17, 1, 1]***blocks.1.2.bn1.weight:[61]***blocks.1.2.bn1.bias:[61]***blocks.1.2.bn1.running_mean:[61]***blocks.1.2.bn1.running_var:[61]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[61, 1, 3, 3]***blocks.1.2.bn2.weight:[61]***blocks.1.2.bn2.bias:[61]***blocks.1.2.bn2.running_mean:[61]***blocks.1.2.bn2.running_var:[61]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 61, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[61, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[61]***blocks.1.2.conv_pwl.weight:[17, 61, 1, 1]***blocks.1.2.bn3.weight:[17]***blocks.1.2.bn3.bias:[17]***blocks.1.2.bn3.running_mean:[17]***blocks.1.2.bn3.running_var:[17]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[86, 17, 1, 1]***blocks.2.0.bn1.weight:[86]***blocks.2.0.bn1.bias:[86]***blocks.2.0.bn1.running_mean:[86]***blocks.2.0.bn1.running_var:[86]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[86, 1, 5, 5]***blocks.2.0.bn2.weight:[86]***blocks.2.0.bn2.bias:[86]***blocks.2.0.bn2.running_mean:[86]***blocks.2.0.bn2.running_var:[86]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 86, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[86, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[86]***blocks.2.0.conv_pwl.weight:[42, 86, 1, 1]***blocks.2.0.bn3.weight:[42]***blocks.2.0.bn3.bias:[42]***blocks.2.0.bn3.running_mean:[42]***blocks.2.0.bn3.running_var:[42]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[72, 42, 1, 1]***blocks.2.1.bn1.weight:[72]***blocks.2.1.bn1.bias:[72]***blocks.2.1.bn1.running_mean:[72]***blocks.2.1.bn1.running_var:[72]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[72, 1, 5, 5]***blocks.2.1.bn2.weight:[72]***blocks.2.1.bn2.bias:[72]***blocks.2.1.bn2.running_mean:[72]***blocks.2.1.bn2.running_var:[72]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 72, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[72, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[72]***blocks.2.1.conv_pwl.weight:[42, 72, 1, 1]***blocks.2.1.bn3.weight:[42]***blocks.2.1.bn3.bias:[42]***blocks.2.1.bn3.running_mean:[42]***blocks.2.1.bn3.running_var:[42]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[98, 42, 1, 1]***blocks.2.2.bn1.weight:[98]***blocks.2.2.bn1.bias:[98]***blocks.2.2.bn1.running_mean:[98]***blocks.2.2.bn1.running_var:[98]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[98, 1, 5, 5]***blocks.2.2.bn2.weight:[98]***blocks.2.2.bn2.bias:[98]***blocks.2.2.bn2.running_mean:[98]***blocks.2.2.bn2.running_var:[98]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 98, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[98, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[98]***blocks.2.2.conv_pwl.weight:[42, 98, 1, 1]***blocks.2.2.bn3.weight:[42]***blocks.2.2.bn3.bias:[42]***blocks.2.2.bn3.running_mean:[42]***blocks.2.2.bn3.running_var:[42]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[245, 42, 1, 1]***blocks.3.0.bn1.weight:[245]***blocks.3.0.bn1.bias:[245]***blocks.3.0.bn1.running_mean:[245]***blocks.3.0.bn1.running_var:[245]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[245, 1, 3, 3]***blocks.3.0.bn2.weight:[245]***blocks.3.0.bn2.bias:[245]***blocks.3.0.bn2.running_mean:[245]***blocks.3.0.bn2.running_var:[245]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 245, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[245, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[245]***blocks.3.0.conv_pwl.weight:[85, 245, 1, 1]***blocks.3.0.bn3.weight:[85]***blocks.3.0.bn3.bias:[85]***blocks.3.0.bn3.running_mean:[85]***blocks.3.0.bn3.running_var:[85]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[274, 85, 1, 1]***blocks.3.1.bn1.weight:[274]***blocks.3.1.bn1.bias:[274]***blocks.3.1.bn1.running_mean:[274]***blocks.3.1.bn1.running_var:[274]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[274, 1, 3, 3]***blocks.3.1.bn2.weight:[274]***blocks.3.1.bn2.bias:[274]***blocks.3.1.bn2.running_mean:[274]***blocks.3.1.bn2.running_var:[274]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[22, 274, 1, 1]***blocks.3.1.se.conv_reduce.bias:[22]***blocks.3.1.se.conv_expand.weight:[274, 22, 1, 1]***blocks.3.1.se.conv_expand.bias:[274]***blocks.3.1.conv_pwl.weight:[85, 274, 1, 1]***blocks.3.1.bn3.weight:[85]***blocks.3.1.bn3.bias:[85]***blocks.3.1.bn3.running_mean:[85]***blocks.3.1.bn3.running_var:[85]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[254, 85, 1, 1]***blocks.3.2.bn1.weight:[254]***blocks.3.2.bn1.bias:[254]***blocks.3.2.bn1.running_mean:[254]***blocks.3.2.bn1.running_var:[254]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[254, 1, 3, 3]***blocks.3.2.bn2.weight:[254]***blocks.3.2.bn2.bias:[254]***blocks.3.2.bn2.running_mean:[254]***blocks.3.2.bn2.running_var:[254]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[22, 254, 1, 1]***blocks.3.2.se.conv_reduce.bias:[22]***blocks.3.2.se.conv_expand.weight:[254, 22, 1, 1]***blocks.3.2.se.conv_expand.bias:[254]***blocks.3.2.conv_pwl.weight:[85, 254, 1, 1]***blocks.3.2.bn3.weight:[85]***blocks.3.2.bn3.bias:[85]***blocks.3.2.bn3.running_mean:[85]***blocks.3.2.bn3.running_var:[85]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[292, 85, 1, 1]***blocks.3.3.bn1.weight:[292]***blocks.3.3.bn1.bias:[292]***blocks.3.3.bn1.running_mean:[292]***blocks.3.3.bn1.running_var:[292]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[292, 1, 3, 3]***blocks.3.3.bn2.weight:[292]***blocks.3.3.bn2.bias:[292]***blocks.3.3.bn2.running_mean:[292]***blocks.3.3.bn2.running_var:[292]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[22, 292, 1, 1]***blocks.3.3.se.conv_reduce.bias:[22]***blocks.3.3.se.conv_expand.weight:[292, 22, 1, 1]***blocks.3.3.se.conv_expand.bias:[292]***blocks.3.3.conv_pwl.weight:[85, 292, 1, 1]***blocks.3.3.bn3.weight:[85]***blocks.3.3.bn3.bias:[85]***blocks.3.3.bn3.running_mean:[85]***blocks.3.3.bn3.running_var:[85]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[502, 85, 1, 1]***blocks.4.0.bn1.weight:[502]***blocks.4.0.bn1.bias:[502]***blocks.4.0.bn1.running_mean:[502]***blocks.4.0.bn1.running_var:[502]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[502, 1, 5, 5]***blocks.4.0.bn2.weight:[502]***blocks.4.0.bn2.bias:[502]***blocks.4.0.bn2.running_mean:[502]***blocks.4.0.bn2.running_var:[502]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[22, 502, 1, 1]***blocks.4.0.se.conv_reduce.bias:[22]***blocks.4.0.se.conv_expand.weight:[502, 22, 1, 1]***blocks.4.0.se.conv_expand.bias:[502]***blocks.4.0.conv_pwl.weight:[116, 502, 1, 1]***blocks.4.0.bn3.weight:[116]***blocks.4.0.bn3.bias:[116]***blocks.4.0.bn3.running_mean:[116]***blocks.4.0.bn3.running_var:[116]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[315, 116, 1, 1]***blocks.4.1.bn1.weight:[315]***blocks.4.1.bn1.bias:[315]***blocks.4.1.bn1.running_mean:[315]***blocks.4.1.bn1.running_var:[315]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[315, 1, 5, 5]***blocks.4.1.bn2.weight:[315]***blocks.4.1.bn2.bias:[315]***blocks.4.1.bn2.running_mean:[315]***blocks.4.1.bn2.running_var:[315]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[30, 315, 1, 1]***blocks.4.1.se.conv_reduce.bias:[30]***blocks.4.1.se.conv_expand.weight:[315, 30, 1, 1]***blocks.4.1.se.conv_expand.bias:[315]***blocks.4.1.conv_pwl.weight:[116, 315, 1, 1]***blocks.4.1.bn3.weight:[116]***blocks.4.1.bn3.bias:[116]***blocks.4.1.bn3.running_mean:[116]***blocks.4.1.bn3.running_var:[116]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[354, 116, 1, 1]***blocks.4.2.bn1.weight:[354]***blocks.4.2.bn1.bias:[354]***blocks.4.2.bn1.running_mean:[354]***blocks.4.2.bn1.running_var:[354]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[354, 1, 5, 5]***blocks.4.2.bn2.weight:[354]***blocks.4.2.bn2.bias:[354]***blocks.4.2.bn2.running_mean:[354]***blocks.4.2.bn2.running_var:[354]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[30, 354, 1, 1]***blocks.4.2.se.conv_reduce.bias:[30]***blocks.4.2.se.conv_expand.weight:[354, 30, 1, 1]***blocks.4.2.se.conv_expand.bias:[354]***blocks.4.2.conv_pwl.weight:[116, 354, 1, 1]***blocks.4.2.bn3.weight:[116]***blocks.4.2.bn3.bias:[116]***blocks.4.2.bn3.running_mean:[116]***blocks.4.2.bn3.running_var:[116]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[443, 116, 1, 1]***blocks.4.3.bn1.weight:[443]***blocks.4.3.bn1.bias:[443]***blocks.4.3.bn1.running_mean:[443]***blocks.4.3.bn1.running_var:[443]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[443, 1, 5, 5]***blocks.4.3.bn2.weight:[443]***blocks.4.3.bn2.bias:[443]***blocks.4.3.bn2.running_mean:[443]***blocks.4.3.bn2.running_var:[443]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[30, 443, 1, 1]***blocks.4.3.se.conv_reduce.bias:[30]***blocks.4.3.se.conv_expand.weight:[443, 30, 1, 1]***blocks.4.3.se.conv_expand.bias:[443]***blocks.4.3.conv_pwl.weight:[116, 443, 1, 1]***blocks.4.3.bn3.weight:[116]***blocks.4.3.bn3.bias:[116]***blocks.4.3.bn3.running_mean:[116]***blocks.4.3.bn3.running_var:[116]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[719, 116, 1, 1]***blocks.5.0.bn1.weight:[719]***blocks.5.0.bn1.bias:[719]***blocks.5.0.bn1.running_mean:[719]***blocks.5.0.bn1.running_var:[719]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[719, 1, 5, 5]***blocks.5.0.bn2.weight:[719]***blocks.5.0.bn2.bias:[719]***blocks.5.0.bn2.running_mean:[719]***blocks.5.0.bn2.running_var:[719]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[30, 719, 1, 1]***blocks.5.0.se.conv_reduce.bias:[30]***blocks.5.0.se.conv_expand.weight:[719, 30, 1, 1]***blocks.5.0.se.conv_expand.bias:[719]***blocks.5.0.conv_pwl.weight:[208, 719, 1, 1]***blocks.5.0.bn3.weight:[208]***blocks.5.0.bn3.bias:[208]***blocks.5.0.bn3.running_mean:[208]***blocks.5.0.bn3.running_var:[208]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1148, 208, 1, 1]***blocks.5.1.bn1.weight:[1148]***blocks.5.1.bn1.bias:[1148]***blocks.5.1.bn1.running_mean:[1148]***blocks.5.1.bn1.running_var:[1148]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1148, 1, 5, 5]***blocks.5.1.bn2.weight:[1148]***blocks.5.1.bn2.bias:[1148]***blocks.5.1.bn2.running_mean:[1148]***blocks.5.1.bn2.running_var:[1148]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[52, 1148, 1, 1]***blocks.5.1.se.conv_reduce.bias:[52]***blocks.5.1.se.conv_expand.weight:[1148, 52, 1, 1]***blocks.5.1.se.conv_expand.bias:[1148]***blocks.5.1.conv_pwl.weight:[208, 1148, 1, 1]***blocks.5.1.bn3.weight:[208]***blocks.5.1.bn3.bias:[208]***blocks.5.1.bn3.running_mean:[208]***blocks.5.1.bn3.running_var:[208]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[1160, 208, 1, 1]***blocks.5.2.bn1.weight:[1160]***blocks.5.2.bn1.bias:[1160]***blocks.5.2.bn1.running_mean:[1160]***blocks.5.2.bn1.running_var:[1160]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[1160, 1, 5, 5]***blocks.5.2.bn2.weight:[1160]***blocks.5.2.bn2.bias:[1160]***blocks.5.2.bn2.running_mean:[1160]***blocks.5.2.bn2.running_var:[1160]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[52, 1160, 1, 1]***blocks.5.2.se.conv_reduce.bias:[52]***blocks.5.2.se.conv_expand.weight:[1160, 52, 1, 1]***blocks.5.2.se.conv_expand.bias:[1160]***blocks.5.2.conv_pwl.weight:[208, 1160, 1, 1]***blocks.5.2.bn3.weight:[208]***blocks.5.2.bn3.bias:[208]***blocks.5.2.bn3.running_mean:[208]***blocks.5.2.bn3.running_var:[208]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1182, 208, 1, 1]***blocks.5.3.bn1.weight:[1182]***blocks.5.3.bn1.bias:[1182]***blocks.5.3.bn1.running_mean:[1182]***blocks.5.3.bn1.running_var:[1182]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1182, 1, 5, 5]***blocks.5.3.bn2.weight:[1182]***blocks.5.3.bn2.bias:[1182]***blocks.5.3.bn2.running_mean:[1182]***blocks.5.3.bn2.running_var:[1182]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[52, 1182, 1, 1]***blocks.5.3.se.conv_reduce.bias:[52]***blocks.5.3.se.conv_expand.weight:[1182, 52, 1, 1]***blocks.5.3.se.conv_expand.bias:[1182]***blocks.5.3.conv_pwl.weight:[208, 1182, 1, 1]***blocks.5.3.bn3.weight:[208]***blocks.5.3.bn3.bias:[208]***blocks.5.3.bn3.running_mean:[208]***blocks.5.3.bn3.running_var:[208]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1228, 208, 1, 1]***blocks.5.4.bn1.weight:[1228]***blocks.5.4.bn1.bias:[1228]***blocks.5.4.bn1.running_mean:[1228]***blocks.5.4.bn1.running_var:[1228]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1228, 1, 5, 5]***blocks.5.4.bn2.weight:[1228]***blocks.5.4.bn2.bias:[1228]***blocks.5.4.bn2.running_mean:[1228]***blocks.5.4.bn2.running_var:[1228]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[52, 1228, 1, 1]***blocks.5.4.se.conv_reduce.bias:[52]***blocks.5.4.se.conv_expand.weight:[1228, 52, 1, 1]***blocks.5.4.se.conv_expand.bias:[1228]***blocks.5.4.conv_pwl.weight:[208, 1228, 1, 1]***blocks.5.4.bn3.weight:[208]***blocks.5.4.bn3.bias:[208]***blocks.5.4.bn3.running_mean:[208]***blocks.5.4.bn3.running_var:[208]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1248, 208, 1, 1]***blocks.6.0.bn1.weight:[1248]***blocks.6.0.bn1.bias:[1248]***blocks.6.0.bn1.running_mean:[1248]***blocks.6.0.bn1.running_var:[1248]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1248, 1, 3, 3]***blocks.6.0.bn2.weight:[1248]***blocks.6.0.bn2.bias:[1248]***blocks.6.0.bn2.running_mean:[1248]***blocks.6.0.bn2.running_var:[1248]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[52, 1248, 1, 1]***blocks.6.0.se.conv_reduce.bias:[52]***blocks.6.0.se.conv_expand.weight:[1248, 52, 1, 1]***blocks.6.0.se.conv_expand.bias:[1248]***blocks.6.0.conv_pwl.weight:[352, 1248, 1, 1]***blocks.6.0.bn3.weight:[352]***blocks.6.0.bn3.bias:[352]***blocks.6.0.bn3.running_mean:[352]***blocks.6.0.bn3.running_var:[352]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2112, 352, 1, 1]***blocks.6.1.bn1.weight:[2112]***blocks.6.1.bn1.bias:[2112]***blocks.6.1.bn1.running_mean:[2112]***blocks.6.1.bn1.running_var:[2112]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2112, 1, 3, 3]***blocks.6.1.bn2.weight:[2112]***blocks.6.1.bn2.bias:[2112]***blocks.6.1.bn2.running_mean:[2112]***blocks.6.1.bn2.running_var:[2112]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[88, 2112, 1, 1]***blocks.6.1.se.conv_reduce.bias:[88]***blocks.6.1.se.conv_expand.weight:[2112, 88, 1, 1]***blocks.6.1.se.conv_expand.bias:[2112]***blocks.6.1.conv_pwl.weight:[352, 2112, 1, 1]***blocks.6.1.bn3.weight:[352]***blocks.6.1.bn3.bias:[352]***blocks.6.1.bn3.running_mean:[352]***blocks.6.1.bn3.running_var:[352]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1408, 352, 1, 1]***bn2.weight:[1408]***bn2.bias:[1408]***bn2.running_mean:[1408]***bn2.running_var:[1408]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1408]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/ecaresnet50d_pruned.txt
conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[47, 64, 1, 1]***layer1.0.bn1.weight:[47]***layer1.0.conv2.weight:[18, 47, 3, 3]***layer1.0.bn2.weight:[18]***layer1.0.conv3.weight:[19, 18, 1, 1]***layer1.0.bn3.weight:[19]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[19, 64, 1, 1]***layer1.0.downsample.2.weight:[19]***layer1.1.conv1.weight:[52, 19, 1, 1]***layer1.1.bn1.weight:[52]***layer1.1.conv2.weight:[22, 52, 3, 3]***layer1.1.bn2.weight:[22]***layer1.1.conv3.weight:[19, 22, 1, 1]***layer1.1.bn3.weight:[19]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[64, 19, 1, 1]***layer1.2.bn1.weight:[64]***layer1.2.conv2.weight:[35, 64, 3, 3]***layer1.2.bn2.weight:[35]***layer1.2.conv3.weight:[19, 35, 1, 1]***layer1.2.bn3.weight:[19]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[85, 19, 1, 1]***layer2.0.bn1.weight:[85]***layer2.0.conv2.weight:[37, 85, 3, 3]***layer2.0.bn2.weight:[37]***layer2.0.conv3.weight:[171, 37, 1, 1]***layer2.0.bn3.weight:[171]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[171, 19, 1, 1]***layer2.0.downsample.2.weight:[171]***layer2.1.conv1.weight:[107, 171, 1, 1]***layer2.1.bn1.weight:[107]***layer2.1.conv2.weight:[80, 107, 3, 3]***layer2.1.bn2.weight:[80]***layer2.1.conv3.weight:[171, 80, 1, 1]***layer2.1.bn3.weight:[171]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[120, 171, 1, 1]***layer2.2.bn1.weight:[120]***layer2.2.conv2.weight:[85, 120, 3, 3]***layer2.2.bn2.weight:[85]***layer2.2.conv3.weight:[171, 85, 1, 1]***layer2.2.bn3.weight:[171]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[125, 171, 1, 1]***layer2.3.bn1.weight:[125]***layer2.3.conv2.weight:[87, 125, 3, 3]***layer2.3.bn2.weight:[87]***layer2.3.conv3.weight:[171, 87, 1, 1]***layer2.3.bn3.weight:[171]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[198, 171, 1, 1]***layer3.0.bn1.weight:[198]***layer3.0.conv2.weight:[126, 198, 3, 3]***layer3.0.bn2.weight:[126]***layer3.0.conv3.weight:[818, 126, 1, 1]***layer3.0.bn3.weight:[818]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[818, 171, 1, 1]***layer3.0.downsample.2.weight:[818]***layer3.1.conv1.weight:[255, 818, 1, 1]***layer3.1.bn1.weight:[255]***layer3.1.conv2.weight:[232, 255, 3, 3]***layer3.1.bn2.weight:[232]***layer3.1.conv3.weight:[818, 232, 1, 1]***layer3.1.bn3.weight:[818]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[256, 818, 1, 1]***layer3.2.bn1.weight:[256]***layer3.2.conv2.weight:[233, 256, 3, 3]***layer3.2.bn2.weight:[233]***layer3.2.conv3.weight:[818, 233, 1, 1]***layer3.2.bn3.weight:[818]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[253, 818, 1, 1]***layer3.3.bn1.weight:[253]***layer3.3.conv2.weight:[235, 253, 3, 3]***layer3.3.bn2.weight:[235]***layer3.3.conv3.weight:[818, 235, 1, 1]***layer3.3.bn3.weight:[818]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[256, 818, 1, 1]***layer3.4.bn1.weight:[256]***layer3.4.conv2.weight:[225, 256, 3, 3]***layer3.4.bn2.weight:[225]***layer3.4.conv3.weight:[818, 225, 1, 1]***layer3.4.bn3.weight:[818]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[256, 818, 1, 1]***layer3.5.bn1.weight:[256]***layer3.5.conv2.weight:[239, 256, 3, 3]***layer3.5.bn2.weight:[239]***layer3.5.conv3.weight:[818, 239, 1, 1]***layer3.5.bn3.weight:[818]***layer3.5.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[492, 818, 1, 1]***layer4.0.bn1.weight:[492]***layer4.0.conv2.weight:[237, 492, 3, 3]***layer4.0.bn2.weight:[237]***layer4.0.conv3.weight:[2022, 237, 1, 1]***layer4.0.bn3.weight:[2022]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2022, 818, 1, 1]***layer4.0.downsample.2.weight:[2022]***layer4.1.conv1.weight:[512, 2022, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[500, 512, 3, 3]***layer4.1.bn2.weight:[500]***layer4.1.conv3.weight:[2022, 500, 1, 1]***layer4.1.bn3.weight:[2022]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2022, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[490, 512, 3, 3]***layer4.2.bn2.weight:[490]***layer4.2.conv3.weight:[2022, 490, 1, 1]***layer4.2.bn3.weight:[2022]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2022]***layer1_2_conv3_M.weight:[256, 19]***layer2_3_conv3_M.weight:[512, 171]***layer3_5_conv3_M.weight:[1024, 818]***layer4_2_conv3_M.weight:[2048, 2022]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt
conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/ecaresnet101d_pruned.txt
conv1.0.weight:[32, 3, 3, 3]***conv1.1.weight:[32]***conv1.3.weight:[32, 32, 3, 3]***conv1.4.weight:[32]***conv1.6.weight:[64, 32, 3, 3]***bn1.weight:[64]***layer1.0.conv1.weight:[45, 64, 1, 1]***layer1.0.bn1.weight:[45]***layer1.0.conv2.weight:[25, 45, 3, 3]***layer1.0.bn2.weight:[25]***layer1.0.conv3.weight:[26, 25, 1, 1]***layer1.0.bn3.weight:[26]***layer1.0.se.conv.weight:[1, 1, 5]***layer1.0.downsample.1.weight:[26, 64, 1, 1]***layer1.0.downsample.2.weight:[26]***layer1.1.conv1.weight:[53, 26, 1, 1]***layer1.1.bn1.weight:[53]***layer1.1.conv2.weight:[20, 53, 3, 3]***layer1.1.bn2.weight:[20]***layer1.1.conv3.weight:[26, 20, 1, 1]***layer1.1.bn3.weight:[26]***layer1.1.se.conv.weight:[1, 1, 5]***layer1.2.conv1.weight:[60, 26, 1, 1]***layer1.2.bn1.weight:[60]***layer1.2.conv2.weight:[27, 60, 3, 3]***layer1.2.bn2.weight:[27]***layer1.2.conv3.weight:[26, 27, 1, 1]***layer1.2.bn3.weight:[26]***layer1.2.se.conv.weight:[1, 1, 5]***layer2.0.conv1.weight:[81, 26, 1, 1]***layer2.0.bn1.weight:[81]***layer2.0.conv2.weight:[24, 81, 3, 3]***layer2.0.bn2.weight:[24]***layer2.0.conv3.weight:[142, 24, 1, 1]***layer2.0.bn3.weight:[142]***layer2.0.se.conv.weight:[1, 1, 5]***layer2.0.downsample.1.weight:[142, 26, 1, 1]***layer2.0.downsample.2.weight:[142]***layer2.1.conv1.weight:[93, 142, 1, 1]***layer2.1.bn1.weight:[93]***layer2.1.conv2.weight:[49, 93, 3, 3]***layer2.1.bn2.weight:[49]***layer2.1.conv3.weight:[142, 49, 1, 1]***layer2.1.bn3.weight:[142]***layer2.1.se.conv.weight:[1, 1, 5]***layer2.2.conv1.weight:[102, 142, 1, 1]***layer2.2.bn1.weight:[102]***layer2.2.conv2.weight:[54, 102, 3, 3]***layer2.2.bn2.weight:[54]***layer2.2.conv3.weight:[142, 54, 1, 1]***layer2.2.bn3.weight:[142]***layer2.2.se.conv.weight:[1, 1, 5]***layer2.3.conv1.weight:[122, 142, 1, 1]***layer2.3.bn1.weight:[122]***layer2.3.conv2.weight:[78, 122, 3, 3]***layer2.3.bn2.weight:[78]***layer2.3.conv3.weight:[142, 78, 1, 1]***layer2.3.bn3.weight:[142]***layer2.3.se.conv.weight:[1, 1, 5]***layer3.0.conv1.weight:[101, 142, 1, 1]***layer3.0.bn1.weight:[101]***layer3.0.conv2.weight:[25, 101, 3, 3]***layer3.0.bn2.weight:[25]***layer3.0.conv3.weight:[278, 25, 1, 1]***layer3.0.bn3.weight:[278]***layer3.0.se.conv.weight:[1, 1, 5]***layer3.0.downsample.1.weight:[278, 142, 1, 1]***layer3.0.downsample.2.weight:[278]***layer3.1.conv1.weight:[239, 278, 1, 1]***layer3.1.bn1.weight:[239]***layer3.1.conv2.weight:[160, 239, 3, 3]***layer3.1.bn2.weight:[160]***layer3.1.conv3.weight:[278, 160, 1, 1]***layer3.1.bn3.weight:[278]***layer3.1.se.conv.weight:[1, 1, 5]***layer3.2.conv1.weight:[234, 278, 1, 1]***layer3.2.bn1.weight:[234]***layer3.2.conv2.weight:[156, 234, 3, 3]***layer3.2.bn2.weight:[156]***layer3.2.conv3.weight:[278, 156, 1, 1]***layer3.2.bn3.weight:[278]***layer3.2.se.conv.weight:[1, 1, 5]***layer3.3.conv1.weight:[250, 278, 1, 1]***layer3.3.bn1.weight:[250]***layer3.3.conv2.weight:[176, 250, 3, 3]***layer3.3.bn2.weight:[176]***layer3.3.conv3.weight:[278, 176, 1, 1]***layer3.3.bn3.weight:[278]***layer3.3.se.conv.weight:[1, 1, 5]***layer3.4.conv1.weight:[253, 278, 1, 1]***layer3.4.bn1.weight:[253]***layer3.4.conv2.weight:[191, 253, 3, 3]***layer3.4.bn2.weight:[191]***layer3.4.conv3.weight:[278, 191, 1, 1]***layer3.4.bn3.weight:[278]***layer3.4.se.conv.weight:[1, 1, 5]***layer3.5.conv1.weight:[251, 278, 1, 1]***layer3.5.bn1.weight:[251]***layer3.5.conv2.weight:[175, 251, 3, 3]***layer3.5.bn2.weight:[175]***layer3.5.conv3.weight:[278, 175, 1, 1]***layer3.5.bn3.weight:[278]***layer3.5.se.conv.weight:[1, 1, 5]***layer3.6.conv1.weight:[230, 278, 1, 1]***layer3.6.bn1.weight:[230]***layer3.6.conv2.weight:[128, 230, 3, 3]***layer3.6.bn2.weight:[128]***layer3.6.conv3.weight:[278, 128, 1, 1]***layer3.6.bn3.weight:[278]***layer3.6.se.conv.weight:[1, 1, 5]***layer3.7.conv1.weight:[244, 278, 1, 1]***layer3.7.bn1.weight:[244]***layer3.7.conv2.weight:[154, 244, 3, 3]***layer3.7.bn2.weight:[154]***layer3.7.conv3.weight:[278, 154, 1, 1]***layer3.7.bn3.weight:[278]***layer3.7.se.conv.weight:[1, 1, 5]***layer3.8.conv1.weight:[244, 278, 1, 1]***layer3.8.bn1.weight:[244]***layer3.8.conv2.weight:[159, 244, 3, 3]***layer3.8.bn2.weight:[159]***layer3.8.conv3.weight:[278, 159, 1, 1]***layer3.8.bn3.weight:[278]***layer3.8.se.conv.weight:[1, 1, 5]***layer3.9.conv1.weight:[238, 278, 1, 1]***layer3.9.bn1.weight:[238]***layer3.9.conv2.weight:[97, 238, 3, 3]***layer3.9.bn2.weight:[97]***layer3.9.conv3.weight:[278, 97, 1, 1]***layer3.9.bn3.weight:[278]***layer3.9.se.conv.weight:[1, 1, 5]***layer3.10.conv1.weight:[244, 278, 1, 1]***layer3.10.bn1.weight:[244]***layer3.10.conv2.weight:[149, 244, 3, 3]***layer3.10.bn2.weight:[149]***layer3.10.conv3.weight:[278, 149, 1, 1]***layer3.10.bn3.weight:[278]***layer3.10.se.conv.weight:[1, 1, 5]***layer3.11.conv1.weight:[253, 278, 1, 1]***layer3.11.bn1.weight:[253]***layer3.11.conv2.weight:[181, 253, 3, 3]***layer3.11.bn2.weight:[181]***layer3.11.conv3.weight:[278, 181, 1, 1]***layer3.11.bn3.weight:[278]***layer3.11.se.conv.weight:[1, 1, 5]***layer3.12.conv1.weight:[245, 278, 1, 1]***layer3.12.bn1.weight:[245]***layer3.12.conv2.weight:[119, 245, 3, 3]***layer3.12.bn2.weight:[119]***layer3.12.conv3.weight:[278, 119, 1, 1]***layer3.12.bn3.weight:[278]***layer3.12.se.conv.weight:[1, 1, 5]***layer3.13.conv1.weight:[255, 278, 1, 1]***layer3.13.bn1.weight:[255]***layer3.13.conv2.weight:[216, 255, 3, 3]***layer3.13.bn2.weight:[216]***layer3.13.conv3.weight:[278, 216, 1, 1]***layer3.13.bn3.weight:[278]***layer3.13.se.conv.weight:[1, 1, 5]***layer3.14.conv1.weight:[256, 278, 1, 1]***layer3.14.bn1.weight:[256]***layer3.14.conv2.weight:[201, 256, 3, 3]***layer3.14.bn2.weight:[201]***layer3.14.conv3.weight:[278, 201, 1, 1]***layer3.14.bn3.weight:[278]***layer3.14.se.conv.weight:[1, 1, 5]***layer3.15.conv1.weight:[253, 278, 1, 1]***layer3.15.bn1.weight:[253]***layer3.15.conv2.weight:[149, 253, 3, 3]***layer3.15.bn2.weight:[149]***layer3.15.conv3.weight:[278, 149, 1, 1]***layer3.15.bn3.weight:[278]***layer3.15.se.conv.weight:[1, 1, 5]***layer3.16.conv1.weight:[254, 278, 1, 1]***layer3.16.bn1.weight:[254]***layer3.16.conv2.weight:[141, 254, 3, 3]***layer3.16.bn2.weight:[141]***layer3.16.conv3.weight:[278, 141, 1, 1]***layer3.16.bn3.weight:[278]***layer3.16.se.conv.weight:[1, 1, 5]***layer3.17.conv1.weight:[256, 278, 1, 1]***layer3.17.bn1.weight:[256]***layer3.17.conv2.weight:[190, 256, 3, 3]***layer3.17.bn2.weight:[190]***layer3.17.conv3.weight:[278, 190, 1, 1]***layer3.17.bn3.weight:[278]***layer3.17.se.conv.weight:[1, 1, 5]***layer3.18.conv1.weight:[256, 278, 1, 1]***layer3.18.bn1.weight:[256]***layer3.18.conv2.weight:[217, 256, 3, 3]***layer3.18.bn2.weight:[217]***layer3.18.conv3.weight:[278, 217, 1, 1]***layer3.18.bn3.weight:[278]***layer3.18.se.conv.weight:[1, 1, 5]***layer3.19.conv1.weight:[255, 278, 1, 1]***layer3.19.bn1.weight:[255]***layer3.19.conv2.weight:[156, 255, 3, 3]***layer3.19.bn2.weight:[156]***layer3.19.conv3.weight:[278, 156, 1, 1]***layer3.19.bn3.weight:[278]***layer3.19.se.conv.weight:[1, 1, 5]***layer3.20.conv1.weight:[256, 278, 1, 1]***layer3.20.bn1.weight:[256]***layer3.20.conv2.weight:[155, 256, 3, 3]***layer3.20.bn2.weight:[155]***layer3.20.conv3.weight:[278, 155, 1, 1]***layer3.20.bn3.weight:[278]***layer3.20.se.conv.weight:[1, 1, 5]***layer3.21.conv1.weight:[256, 278, 1, 1]***layer3.21.bn1.weight:[256]***layer3.21.conv2.weight:[232, 256, 3, 3]***layer3.21.bn2.weight:[232]***layer3.21.conv3.weight:[278, 232, 1, 1]***layer3.21.bn3.weight:[278]***layer3.21.se.conv.weight:[1, 1, 5]***layer3.22.conv1.weight:[256, 278, 1, 1]***layer3.22.bn1.weight:[256]***layer3.22.conv2.weight:[214, 256, 3, 3]***layer3.22.bn2.weight:[214]***layer3.22.conv3.weight:[278, 214, 1, 1]***layer3.22.bn3.weight:[278]***layer3.22.se.conv.weight:[1, 1, 5]***layer4.0.conv1.weight:[499, 278, 1, 1]***layer4.0.bn1.weight:[499]***layer4.0.conv2.weight:[289, 499, 3, 3]***layer4.0.bn2.weight:[289]***layer4.0.conv3.weight:[2042, 289, 1, 1]***layer4.0.bn3.weight:[2042]***layer4.0.se.conv.weight:[1, 1, 7]***layer4.0.downsample.1.weight:[2042, 278, 1, 1]***layer4.0.downsample.2.weight:[2042]***layer4.1.conv1.weight:[512, 2042, 1, 1]***layer4.1.bn1.weight:[512]***layer4.1.conv2.weight:[512, 512, 3, 3]***layer4.1.bn2.weight:[512]***layer4.1.conv3.weight:[2042, 512, 1, 1]***layer4.1.bn3.weight:[2042]***layer4.1.se.conv.weight:[1, 1, 7]***layer4.2.conv1.weight:[512, 2042, 1, 1]***layer4.2.bn1.weight:[512]***layer4.2.conv2.weight:[502, 512, 3, 3]***layer4.2.bn2.weight:[502]***layer4.2.conv3.weight:[2042, 502, 1, 1]***layer4.2.bn3.weight:[2042]***layer4.2.se.conv.weight:[1, 1, 7]***fc.weight:[1000, 2042]***layer1_2_conv3_M.weight:[256, 26]***layer2_3_conv3_M.weight:[512, 142]***layer3_22_conv3_M.weight:[1024, 278]***layer4_2_conv3_M.weight:[2048, 2042]
0
hf_public_repos/pytorch-image-models/timm/models
hf_public_repos/pytorch-image-models/timm/models/_pruned/efficientnet_b3_pruned.txt
conv_stem.weight:[40, 3, 3, 3]***bn1.weight:[40]***bn1.bias:[40]***bn1.running_mean:[40]***bn1.running_var:[40]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[40, 1, 3, 3]***blocks.0.0.bn1.weight:[40]***blocks.0.0.bn1.bias:[40]***blocks.0.0.bn1.running_mean:[40]***blocks.0.0.bn1.running_var:[40]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[10, 40, 1, 1]***blocks.0.0.se.conv_reduce.bias:[10]***blocks.0.0.se.conv_expand.weight:[40, 10, 1, 1]***blocks.0.0.se.conv_expand.bias:[40]***blocks.0.0.conv_pw.weight:[24, 40, 1, 1]***blocks.0.0.bn2.weight:[24]***blocks.0.0.bn2.bias:[24]***blocks.0.0.bn2.running_mean:[24]***blocks.0.0.bn2.running_var:[24]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[24, 1, 3, 3]***blocks.0.1.bn1.weight:[24]***blocks.0.1.bn1.bias:[24]***blocks.0.1.bn1.running_mean:[24]***blocks.0.1.bn1.running_var:[24]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[6, 24, 1, 1]***blocks.0.1.se.conv_reduce.bias:[6]***blocks.0.1.se.conv_expand.weight:[24, 6, 1, 1]***blocks.0.1.se.conv_expand.bias:[24]***blocks.0.1.conv_pw.weight:[24, 24, 1, 1]***blocks.0.1.bn2.weight:[24]***blocks.0.1.bn2.bias:[24]***blocks.0.1.bn2.running_mean:[24]***blocks.0.1.bn2.running_var:[24]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[27, 24, 1, 1]***blocks.1.0.bn1.weight:[27]***blocks.1.0.bn1.bias:[27]***blocks.1.0.bn1.running_mean:[27]***blocks.1.0.bn1.running_var:[27]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[27, 1, 3, 3]***blocks.1.0.bn2.weight:[27]***blocks.1.0.bn2.bias:[27]***blocks.1.0.bn2.running_mean:[27]***blocks.1.0.bn2.running_var:[27]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[6, 27, 1, 1]***blocks.1.0.se.conv_reduce.bias:[6]***blocks.1.0.se.conv_expand.weight:[27, 6, 1, 1]***blocks.1.0.se.conv_expand.bias:[27]***blocks.1.0.conv_pwl.weight:[12, 27, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[49, 12, 1, 1]***blocks.1.1.bn1.weight:[49]***blocks.1.1.bn1.bias:[49]***blocks.1.1.bn1.running_mean:[49]***blocks.1.1.bn1.running_var:[49]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[49, 1, 3, 3]***blocks.1.1.bn2.weight:[49]***blocks.1.1.bn2.bias:[49]***blocks.1.1.bn2.running_mean:[49]***blocks.1.1.bn2.running_var:[49]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[8, 49, 1, 1]***blocks.1.1.se.conv_reduce.bias:[8]***blocks.1.1.se.conv_expand.weight:[49, 8, 1, 1]***blocks.1.1.se.conv_expand.bias:[49]***blocks.1.1.conv_pwl.weight:[12, 49, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[8, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[8]***blocks.1.2.se.conv_expand.weight:[48, 8, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[83, 12, 1, 1]***blocks.2.0.bn1.weight:[83]***blocks.2.0.bn1.bias:[83]***blocks.2.0.bn1.running_mean:[83]***blocks.2.0.bn1.running_var:[83]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[83, 1, 5, 5]***blocks.2.0.bn2.weight:[83]***blocks.2.0.bn2.bias:[83]***blocks.2.0.bn2.running_mean:[83]***blocks.2.0.bn2.running_var:[83]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[8, 83, 1, 1]***blocks.2.0.se.conv_reduce.bias:[8]***blocks.2.0.se.conv_expand.weight:[83, 8, 1, 1]***blocks.2.0.se.conv_expand.bias:[83]***blocks.2.0.conv_pwl.weight:[40, 83, 1, 1]***blocks.2.0.bn3.weight:[40]***blocks.2.0.bn3.bias:[40]***blocks.2.0.bn3.running_mean:[40]***blocks.2.0.bn3.running_var:[40]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[90, 40, 1, 1]***blocks.2.1.bn1.weight:[90]***blocks.2.1.bn1.bias:[90]***blocks.2.1.bn1.running_mean:[90]***blocks.2.1.bn1.running_var:[90]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[90, 1, 5, 5]***blocks.2.1.bn2.weight:[90]***blocks.2.1.bn2.bias:[90]***blocks.2.1.bn2.running_mean:[90]***blocks.2.1.bn2.running_var:[90]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[12, 90, 1, 1]***blocks.2.1.se.conv_reduce.bias:[12]***blocks.2.1.se.conv_expand.weight:[90, 12, 1, 1]***blocks.2.1.se.conv_expand.bias:[90]***blocks.2.1.conv_pwl.weight:[40, 90, 1, 1]***blocks.2.1.bn3.weight:[40]***blocks.2.1.bn3.bias:[40]***blocks.2.1.bn3.running_mean:[40]***blocks.2.1.bn3.running_var:[40]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[85, 40, 1, 1]***blocks.2.2.bn1.weight:[85]***blocks.2.2.bn1.bias:[85]***blocks.2.2.bn1.running_mean:[85]***blocks.2.2.bn1.running_var:[85]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[85, 1, 5, 5]***blocks.2.2.bn2.weight:[85]***blocks.2.2.bn2.bias:[85]***blocks.2.2.bn2.running_mean:[85]***blocks.2.2.bn2.running_var:[85]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[12, 85, 1, 1]***blocks.2.2.se.conv_reduce.bias:[12]***blocks.2.2.se.conv_expand.weight:[85, 12, 1, 1]***blocks.2.2.se.conv_expand.bias:[85]***blocks.2.2.conv_pwl.weight:[40, 85, 1, 1]***blocks.2.2.bn3.weight:[40]***blocks.2.2.bn3.bias:[40]***blocks.2.2.bn3.running_mean:[40]***blocks.2.2.bn3.running_var:[40]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[215, 40, 1, 1]***blocks.3.0.bn1.weight:[215]***blocks.3.0.bn1.bias:[215]***blocks.3.0.bn1.running_mean:[215]***blocks.3.0.bn1.running_var:[215]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[215, 1, 3, 3]***blocks.3.0.bn2.weight:[215]***blocks.3.0.bn2.bias:[215]***blocks.3.0.bn2.running_mean:[215]***blocks.3.0.bn2.running_var:[215]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[12, 215, 1, 1]***blocks.3.0.se.conv_reduce.bias:[12]***blocks.3.0.se.conv_expand.weight:[215, 12, 1, 1]***blocks.3.0.se.conv_expand.bias:[215]***blocks.3.0.conv_pwl.weight:[93, 215, 1, 1]***blocks.3.0.bn3.weight:[93]***blocks.3.0.bn3.bias:[93]***blocks.3.0.bn3.running_mean:[93]***blocks.3.0.bn3.running_var:[93]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[261, 93, 1, 1]***blocks.3.1.bn1.weight:[261]***blocks.3.1.bn1.bias:[261]***blocks.3.1.bn1.running_mean:[261]***blocks.3.1.bn1.running_var:[261]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[261, 1, 3, 3]***blocks.3.1.bn2.weight:[261]***blocks.3.1.bn2.bias:[261]***blocks.3.1.bn2.running_mean:[261]***blocks.3.1.bn2.running_var:[261]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[24, 261, 1, 1]***blocks.3.1.se.conv_reduce.bias:[24]***blocks.3.1.se.conv_expand.weight:[261, 24, 1, 1]***blocks.3.1.se.conv_expand.bias:[261]***blocks.3.1.conv_pwl.weight:[93, 261, 1, 1]***blocks.3.1.bn3.weight:[93]***blocks.3.1.bn3.bias:[93]***blocks.3.1.bn3.running_mean:[93]***blocks.3.1.bn3.running_var:[93]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[219, 93, 1, 1]***blocks.3.2.bn1.weight:[219]***blocks.3.2.bn1.bias:[219]***blocks.3.2.bn1.running_mean:[219]***blocks.3.2.bn1.running_var:[219]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[219, 1, 3, 3]***blocks.3.2.bn2.weight:[219]***blocks.3.2.bn2.bias:[219]***blocks.3.2.bn2.running_mean:[219]***blocks.3.2.bn2.running_var:[219]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[24, 219, 1, 1]***blocks.3.2.se.conv_reduce.bias:[24]***blocks.3.2.se.conv_expand.weight:[219, 24, 1, 1]***blocks.3.2.se.conv_expand.bias:[219]***blocks.3.2.conv_pwl.weight:[93, 219, 1, 1]***blocks.3.2.bn3.weight:[93]***blocks.3.2.bn3.bias:[93]***blocks.3.2.bn3.running_mean:[93]***blocks.3.2.bn3.running_var:[93]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[254, 93, 1, 1]***blocks.3.3.bn1.weight:[254]***blocks.3.3.bn1.bias:[254]***blocks.3.3.bn1.running_mean:[254]***blocks.3.3.bn1.running_var:[254]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[254, 1, 3, 3]***blocks.3.3.bn2.weight:[254]***blocks.3.3.bn2.bias:[254]***blocks.3.3.bn2.running_mean:[254]***blocks.3.3.bn2.running_var:[254]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[24, 254, 1, 1]***blocks.3.3.se.conv_reduce.bias:[24]***blocks.3.3.se.conv_expand.weight:[254, 24, 1, 1]***blocks.3.3.se.conv_expand.bias:[254]***blocks.3.3.conv_pwl.weight:[93, 254, 1, 1]***blocks.3.3.bn3.weight:[93]***blocks.3.3.bn3.bias:[93]***blocks.3.3.bn3.running_mean:[93]***blocks.3.3.bn3.running_var:[93]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.3.4.conv_pw.weight:[236, 93, 1, 1]***blocks.3.4.bn1.weight:[236]***blocks.3.4.bn1.bias:[236]***blocks.3.4.bn1.running_mean:[236]***blocks.3.4.bn1.running_var:[236]***blocks.3.4.bn1.num_batches_tracked:[]***blocks.3.4.conv_dw.weight:[236, 1, 3, 3]***blocks.3.4.bn2.weight:[236]***blocks.3.4.bn2.bias:[236]***blocks.3.4.bn2.running_mean:[236]***blocks.3.4.bn2.running_var:[236]***blocks.3.4.bn2.num_batches_tracked:[]***blocks.3.4.se.conv_reduce.weight:[24, 236, 1, 1]***blocks.3.4.se.conv_reduce.bias:[24]***blocks.3.4.se.conv_expand.weight:[236, 24, 1, 1]***blocks.3.4.se.conv_expand.bias:[236]***blocks.3.4.conv_pwl.weight:[93, 236, 1, 1]***blocks.3.4.bn3.weight:[93]***blocks.3.4.bn3.bias:[93]***blocks.3.4.bn3.running_mean:[93]***blocks.3.4.bn3.running_var:[93]***blocks.3.4.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[480, 93, 1, 1]***blocks.4.0.bn1.weight:[480]***blocks.4.0.bn1.bias:[480]***blocks.4.0.bn1.running_mean:[480]***blocks.4.0.bn1.running_var:[480]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[480, 1, 5, 5]***blocks.4.0.bn2.weight:[480]***blocks.4.0.bn2.bias:[480]***blocks.4.0.bn2.running_mean:[480]***blocks.4.0.bn2.running_var:[480]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[24, 480, 1, 1]***blocks.4.0.se.conv_reduce.bias:[24]***blocks.4.0.se.conv_expand.weight:[480, 24, 1, 1]***blocks.4.0.se.conv_expand.bias:[480]***blocks.4.0.conv_pwl.weight:[120, 480, 1, 1]***blocks.4.0.bn3.weight:[120]***blocks.4.0.bn3.bias:[120]***blocks.4.0.bn3.running_mean:[120]***blocks.4.0.bn3.running_var:[120]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[235, 120, 1, 1]***blocks.4.1.bn1.weight:[235]***blocks.4.1.bn1.bias:[235]***blocks.4.1.bn1.running_mean:[235]***blocks.4.1.bn1.running_var:[235]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[235, 1, 5, 5]***blocks.4.1.bn2.weight:[235]***blocks.4.1.bn2.bias:[235]***blocks.4.1.bn2.running_mean:[235]***blocks.4.1.bn2.running_var:[235]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[34, 235, 1, 1]***blocks.4.1.se.conv_reduce.bias:[34]***blocks.4.1.se.conv_expand.weight:[235, 34, 1, 1]***blocks.4.1.se.conv_expand.bias:[235]***blocks.4.1.conv_pwl.weight:[120, 235, 1, 1]***blocks.4.1.bn3.weight:[120]***blocks.4.1.bn3.bias:[120]***blocks.4.1.bn3.running_mean:[120]***blocks.4.1.bn3.running_var:[120]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[217, 120, 1, 1]***blocks.4.2.bn1.weight:[217]***blocks.4.2.bn1.bias:[217]***blocks.4.2.bn1.running_mean:[217]***blocks.4.2.bn1.running_var:[217]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[217, 1, 5, 5]***blocks.4.2.bn2.weight:[217]***blocks.4.2.bn2.bias:[217]***blocks.4.2.bn2.running_mean:[217]***blocks.4.2.bn2.running_var:[217]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[34, 217, 1, 1]***blocks.4.2.se.conv_reduce.bias:[34]***blocks.4.2.se.conv_expand.weight:[217, 34, 1, 1]***blocks.4.2.se.conv_expand.bias:[217]***blocks.4.2.conv_pwl.weight:[120, 217, 1, 1]***blocks.4.2.bn3.weight:[120]***blocks.4.2.bn3.bias:[120]***blocks.4.2.bn3.running_mean:[120]***blocks.4.2.bn3.running_var:[120]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[226, 120, 1, 1]***blocks.4.3.bn1.weight:[226]***blocks.4.3.bn1.bias:[226]***blocks.4.3.bn1.running_mean:[226]***blocks.4.3.bn1.running_var:[226]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[226, 1, 5, 5]***blocks.4.3.bn2.weight:[226]***blocks.4.3.bn2.bias:[226]***blocks.4.3.bn2.running_mean:[226]***blocks.4.3.bn2.running_var:[226]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[33, 226, 1, 1]***blocks.4.3.se.conv_reduce.bias:[33]***blocks.4.3.se.conv_expand.weight:[226, 33, 1, 1]***blocks.4.3.se.conv_expand.bias:[226]***blocks.4.3.conv_pwl.weight:[120, 226, 1, 1]***blocks.4.3.bn3.weight:[120]***blocks.4.3.bn3.bias:[120]***blocks.4.3.bn3.running_mean:[120]***blocks.4.3.bn3.running_var:[120]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.4.4.conv_pw.weight:[340, 120, 1, 1]***blocks.4.4.bn1.weight:[340]***blocks.4.4.bn1.bias:[340]***blocks.4.4.bn1.running_mean:[340]***blocks.4.4.bn1.running_var:[340]***blocks.4.4.bn1.num_batches_tracked:[]***blocks.4.4.conv_dw.weight:[340, 1, 5, 5]***blocks.4.4.bn2.weight:[340]***blocks.4.4.bn2.bias:[340]***blocks.4.4.bn2.running_mean:[340]***blocks.4.4.bn2.running_var:[340]***blocks.4.4.bn2.num_batches_tracked:[]***blocks.4.4.se.conv_reduce.weight:[34, 340, 1, 1]***blocks.4.4.se.conv_reduce.bias:[34]***blocks.4.4.se.conv_expand.weight:[340, 34, 1, 1]***blocks.4.4.se.conv_expand.bias:[340]***blocks.4.4.conv_pwl.weight:[120, 340, 1, 1]***blocks.4.4.bn3.weight:[120]***blocks.4.4.bn3.bias:[120]***blocks.4.4.bn3.running_mean:[120]***blocks.4.4.bn3.running_var:[120]***blocks.4.4.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[802, 120, 1, 1]***blocks.5.0.bn1.weight:[802]***blocks.5.0.bn1.bias:[802]***blocks.5.0.bn1.running_mean:[802]***blocks.5.0.bn1.running_var:[802]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[802, 1, 5, 5]***blocks.5.0.bn2.weight:[802]***blocks.5.0.bn2.bias:[802]***blocks.5.0.bn2.running_mean:[802]***blocks.5.0.bn2.running_var:[802]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[34, 802, 1, 1]***blocks.5.0.se.conv_reduce.bias:[34]***blocks.5.0.se.conv_expand.weight:[802, 34, 1, 1]***blocks.5.0.se.conv_expand.bias:[802]***blocks.5.0.conv_pwl.weight:[232, 802, 1, 1]***blocks.5.0.bn3.weight:[232]***blocks.5.0.bn3.bias:[232]***blocks.5.0.bn3.running_mean:[232]***blocks.5.0.bn3.running_var:[232]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[1030, 232, 1, 1]***blocks.5.1.bn1.weight:[1030]***blocks.5.1.bn1.bias:[1030]***blocks.5.1.bn1.running_mean:[1030]***blocks.5.1.bn1.running_var:[1030]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[1030, 1, 5, 5]***blocks.5.1.bn2.weight:[1030]***blocks.5.1.bn2.bias:[1030]***blocks.5.1.bn2.running_mean:[1030]***blocks.5.1.bn2.running_var:[1030]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[58, 1030, 1, 1]***blocks.5.1.se.conv_reduce.bias:[58]***blocks.5.1.se.conv_expand.weight:[1030, 58, 1, 1]***blocks.5.1.se.conv_expand.bias:[1030]***blocks.5.1.conv_pwl.weight:[232, 1030, 1, 1]***blocks.5.1.bn3.weight:[232]***blocks.5.1.bn3.bias:[232]***blocks.5.1.bn3.running_mean:[232]***blocks.5.1.bn3.running_var:[232]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[924, 232, 1, 1]***blocks.5.2.bn1.weight:[924]***blocks.5.2.bn1.bias:[924]***blocks.5.2.bn1.running_mean:[924]***blocks.5.2.bn1.running_var:[924]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[924, 1, 5, 5]***blocks.5.2.bn2.weight:[924]***blocks.5.2.bn2.bias:[924]***blocks.5.2.bn2.running_mean:[924]***blocks.5.2.bn2.running_var:[924]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[58, 924, 1, 1]***blocks.5.2.se.conv_reduce.bias:[58]***blocks.5.2.se.conv_expand.weight:[924, 58, 1, 1]***blocks.5.2.se.conv_expand.bias:[924]***blocks.5.2.conv_pwl.weight:[232, 924, 1, 1]***blocks.5.2.bn3.weight:[232]***blocks.5.2.bn3.bias:[232]***blocks.5.2.bn3.running_mean:[232]***blocks.5.2.bn3.running_var:[232]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[1016, 232, 1, 1]***blocks.5.3.bn1.weight:[1016]***blocks.5.3.bn1.bias:[1016]***blocks.5.3.bn1.running_mean:[1016]***blocks.5.3.bn1.running_var:[1016]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[1016, 1, 5, 5]***blocks.5.3.bn2.weight:[1016]***blocks.5.3.bn2.bias:[1016]***blocks.5.3.bn2.running_mean:[1016]***blocks.5.3.bn2.running_var:[1016]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[58, 1016, 1, 1]***blocks.5.3.se.conv_reduce.bias:[58]***blocks.5.3.se.conv_expand.weight:[1016, 58, 1, 1]***blocks.5.3.se.conv_expand.bias:[1016]***blocks.5.3.conv_pwl.weight:[232, 1016, 1, 1]***blocks.5.3.bn3.weight:[232]***blocks.5.3.bn3.bias:[232]***blocks.5.3.bn3.running_mean:[232]***blocks.5.3.bn3.running_var:[232]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[1130, 232, 1, 1]***blocks.5.4.bn1.weight:[1130]***blocks.5.4.bn1.bias:[1130]***blocks.5.4.bn1.running_mean:[1130]***blocks.5.4.bn1.running_var:[1130]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[1130, 1, 5, 5]***blocks.5.4.bn2.weight:[1130]***blocks.5.4.bn2.bias:[1130]***blocks.5.4.bn2.running_mean:[1130]***blocks.5.4.bn2.running_var:[1130]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[58, 1130, 1, 1]***blocks.5.4.se.conv_reduce.bias:[58]***blocks.5.4.se.conv_expand.weight:[1130, 58, 1, 1]***blocks.5.4.se.conv_expand.bias:[1130]***blocks.5.4.conv_pwl.weight:[232, 1130, 1, 1]***blocks.5.4.bn3.weight:[232]***blocks.5.4.bn3.bias:[232]***blocks.5.4.bn3.running_mean:[232]***blocks.5.4.bn3.running_var:[232]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.5.5.conv_pw.weight:[1266, 232, 1, 1]***blocks.5.5.bn1.weight:[1266]***blocks.5.5.bn1.bias:[1266]***blocks.5.5.bn1.running_mean:[1266]***blocks.5.5.bn1.running_var:[1266]***blocks.5.5.bn1.num_batches_tracked:[]***blocks.5.5.conv_dw.weight:[1266, 1, 5, 5]***blocks.5.5.bn2.weight:[1266]***blocks.5.5.bn2.bias:[1266]***blocks.5.5.bn2.running_mean:[1266]***blocks.5.5.bn2.running_var:[1266]***blocks.5.5.bn2.num_batches_tracked:[]***blocks.5.5.se.conv_reduce.weight:[58, 1266, 1, 1]***blocks.5.5.se.conv_reduce.bias:[58]***blocks.5.5.se.conv_expand.weight:[1266, 58, 1, 1]***blocks.5.5.se.conv_expand.bias:[1266]***blocks.5.5.conv_pwl.weight:[232, 1266, 1, 1]***blocks.5.5.bn3.weight:[232]***blocks.5.5.bn3.bias:[232]***blocks.5.5.bn3.running_mean:[232]***blocks.5.5.bn3.running_var:[232]***blocks.5.5.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1392, 232, 1, 1]***blocks.6.0.bn1.weight:[1392]***blocks.6.0.bn1.bias:[1392]***blocks.6.0.bn1.running_mean:[1392]***blocks.6.0.bn1.running_var:[1392]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1392, 1, 3, 3]***blocks.6.0.bn2.weight:[1392]***blocks.6.0.bn2.bias:[1392]***blocks.6.0.bn2.running_mean:[1392]***blocks.6.0.bn2.running_var:[1392]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[58, 1392, 1, 1]***blocks.6.0.se.conv_reduce.bias:[58]***blocks.6.0.se.conv_expand.weight:[1392, 58, 1, 1]***blocks.6.0.se.conv_expand.bias:[1392]***blocks.6.0.conv_pwl.weight:[384, 1392, 1, 1]***blocks.6.0.bn3.weight:[384]***blocks.6.0.bn3.bias:[384]***blocks.6.0.bn3.running_mean:[384]***blocks.6.0.bn3.running_var:[384]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[2301, 384, 1, 1]***blocks.6.1.bn1.weight:[2301]***blocks.6.1.bn1.bias:[2301]***blocks.6.1.bn1.running_mean:[2301]***blocks.6.1.bn1.running_var:[2301]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[2301, 1, 3, 3]***blocks.6.1.bn2.weight:[2301]***blocks.6.1.bn2.bias:[2301]***blocks.6.1.bn2.running_mean:[2301]***blocks.6.1.bn2.running_var:[2301]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[96, 2301, 1, 1]***blocks.6.1.se.conv_reduce.bias:[96]***blocks.6.1.se.conv_expand.weight:[2301, 96, 1, 1]***blocks.6.1.se.conv_expand.bias:[2301]***blocks.6.1.conv_pwl.weight:[384, 2301, 1, 1]***blocks.6.1.bn3.weight:[384]***blocks.6.1.bn3.bias:[384]***blocks.6.1.bn3.running_mean:[384]***blocks.6.1.bn3.running_var:[384]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1536, 384, 1, 1]***bn2.weight:[1536]***bn2.bias:[1536]***bn2.running_mean:[1536]***bn2.running_var:[1536]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1536]***classifier.bias:[1000]
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/squeeze_excite.py
""" Squeeze-and-Excitation Channel Attention An SE implementation originally based on PyTorch SE-Net impl. Has since evolved with additional functionality / configuration. Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 Also included is Effective Squeeze-Excitation (ESE). Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn from .create_act import create_act_layer from .helpers import make_divisible class SEModule(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): super(SEModule, self).__init__() self.add_maxpool = add_maxpool if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc1(x_se) x_se = self.act(self.bn(x_se)) x_se = self.fc2(x_se) return x * self.gate(x_se) SqueezeExcite = SEModule # alias class EffectiveSEModule(nn.Module): """ 'Effective Squeeze-Excitation From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 """ def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): super(EffectiveSEModule, self).__init__() self.add_maxpool = add_maxpool self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) if self.add_maxpool: # experimental codepath, may remove or change x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) x_se = self.fc(x_se) return x * self.gate(x_se) EffectiveSqueezeExcite = EffectiveSEModule # alias class SqueezeExciteCl(nn.Module): """ SE Module as defined in original SE-Nets with a few additions Additions include: * divisor can be specified to keep channels % div == 0 (default: 8) * reduction channels can be specified directly by arg (if rd_channels is set) * reduction channels can be specified by float rd_ratio (default: 1/16) * global max pooling can be added to the squeeze aggregation * customizable activation, normalization, and gate layer """ def __init__( self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'): super().__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Linear(channels, rd_channels, bias=bias) self.act = create_act_layer(act_layer, inplace=True) self.fc2 = nn.Linear(rd_channels, channels, bias=bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((1, 2), keepdims=True) # FIXME avg dim [1:n-1], don't assume 2D NHWC x_se = self.fc1(x_se) x_se = self.act(x_se) x_se = self.fc2(x_se) return x * self.gate(x_se)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/__init__.py
from .activations import * from .adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from .blur_pool import BlurPool2d from .classifier import ClassifierHead, create_classifier, NormMlpClassifierHead from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \ set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm import get_norm_layer, create_norm_layer from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to from .gather_excite import GatherExcite from .global_context import GlobalContext from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from .inplace_abn import InplaceAbn from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\ SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d from .padding import get_padding, get_same_padding, pad_same from .patch_dropout import PatchDropout from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed from .pool2d_same import AvgPool2dSame, create_pool2d from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \ build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \ FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvNormAct from .space_to_depth import SpaceToDepthModule, SpaceToDepth, DepthToSpace from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .trace_utils import _assert, _float_to_int from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/blur_pool.py
""" BlurPool layer inspired by - Kornia's Max_BlurPool2d - Making Convolutional Networks Shift-Invariant Again :cite:`zhang2019shiftinvar` Hacked together by Chris Ha and Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .padding import get_padding class BlurPool2d(nn.Module): r"""Creates a module that computes blurs and downsample a given feature map. See :cite:`zhang2019shiftinvar` for more details. Corresponds to the Downsample class, which does blurring and subsampling Args: channels = Number of input channels filt_size (int): binomial filter size for blurring. currently supports 3 (default) and 5. stride (int): downsampling filter stride Returns: torch.Tensor: the transformed tensor. """ def __init__(self, channels, filt_size=3, stride=2) -> None: super(BlurPool2d, self).__init__() assert filt_size > 1 self.channels = channels self.filt_size = filt_size self.stride = stride self.padding = [get_padding(filt_size, stride, dilation=1)] * 4 coeffs = torch.tensor((np.poly1d((0.5, 0.5)) ** (self.filt_size - 1)).coeffs.astype(np.float32)) blur_filter = (coeffs[:, None] * coeffs[None, :])[None, None, :, :].repeat(self.channels, 1, 1, 1) self.register_buffer('filt', blur_filter, persistent=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = F.pad(x, self.padding, 'reflect') return F.conv2d(x, self.filt, stride=self.stride, groups=self.channels)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/config.py
""" Model / Layer Config singleton state """ import os import warnings from typing import Any, Optional import torch __all__ = [ 'is_exportable', 'is_scriptable', 'is_no_jit', 'use_fused_attn', 'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config', 'set_fused_attn' ] # Set to True if prefer to have layers with no jit optimization (includes activations) _NO_JIT = False # Set to True if prefer to have activation layers with no jit optimization # NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying # the jit flags so far are activations. This will change as more layers are updated and/or added. _NO_ACTIVATION_JIT = False # Set to True if exporting a model with Same padding via ONNX _EXPORTABLE = False # Set to True if wanting to use torch.jit.script on a model _SCRIPTABLE = False # use torch.scaled_dot_product_attention where possible _HAS_FUSED_ATTN = hasattr(torch.nn.functional, 'scaled_dot_product_attention') if 'TIMM_FUSED_ATTN' in os.environ: _USE_FUSED_ATTN = int(os.environ['TIMM_FUSED_ATTN']) else: _USE_FUSED_ATTN = 1 # 0 == off, 1 == on (for tested use), 2 == on (for experimental use) def is_no_jit(): return _NO_JIT class set_no_jit: def __init__(self, mode: bool) -> None: global _NO_JIT self.prev = _NO_JIT _NO_JIT = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _NO_JIT _NO_JIT = self.prev return False def is_exportable(): return _EXPORTABLE class set_exportable: def __init__(self, mode: bool) -> None: global _EXPORTABLE self.prev = _EXPORTABLE _EXPORTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _EXPORTABLE _EXPORTABLE = self.prev return False def is_scriptable(): return _SCRIPTABLE class set_scriptable: def __init__(self, mode: bool) -> None: global _SCRIPTABLE self.prev = _SCRIPTABLE _SCRIPTABLE = mode def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE _SCRIPTABLE = self.prev return False class set_layer_config: """ Layer config context manager that allows setting all layer config flags at once. If a flag arg is None, it will not change the current value. """ def __init__( self, scriptable: Optional[bool] = None, exportable: Optional[bool] = None, no_jit: Optional[bool] = None, no_activation_jit: Optional[bool] = None): global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT if scriptable is not None: _SCRIPTABLE = scriptable if exportable is not None: _EXPORTABLE = exportable if no_jit is not None: _NO_JIT = no_jit if no_activation_jit is not None: _NO_ACTIVATION_JIT = no_activation_jit def __enter__(self) -> None: pass def __exit__(self, *args: Any) -> bool: global _SCRIPTABLE global _EXPORTABLE global _NO_JIT global _NO_ACTIVATION_JIT _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev return False def use_fused_attn(experimental: bool = False) -> bool: # NOTE: ONNX export cannot handle F.scaled_dot_product_attention as of pytorch 2.0 if not _HAS_FUSED_ATTN or _EXPORTABLE: return False if experimental: return _USE_FUSED_ATTN > 1 return _USE_FUSED_ATTN > 0 def set_fused_attn(enable: bool = True, experimental: bool = False): global _USE_FUSED_ATTN if not _HAS_FUSED_ATTN: warnings.warn('This version of pytorch does not have F.scaled_dot_product_attention, fused_attn flag ignored.') return if experimental and enable: _USE_FUSED_ATTN = 2 elif enable: _USE_FUSED_ATTN = 1 else: _USE_FUSED_ATTN = 0
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/linear.py
""" Linear layer (alternate definition) """ import torch import torch.nn.functional as F from torch import nn as nn class Linear(nn.Linear): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Wraps torch.nn.Linear to support AMP + torchscript usage by manually casting weight & bias to input.dtype to work around an issue w/ torch.addmm in this use case. """ def forward(self, input: torch.Tensor) -> torch.Tensor: if torch.jit.is_scripting(): bias = self.bias.to(dtype=input.dtype) if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_attn.py
""" Attention Factory Hacked together by / Copyright 2021 Ross Wightman """ import torch from functools import partial from .bottleneck_attn import BottleneckAttn from .cbam import CbamModule, LightCbamModule from .eca import EcaModule, CecaModule from .gather_excite import GatherExcite from .global_context import GlobalContext from .halo_attn import HaloAttn from .lambda_layer import LambdaLayer from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .selective_kernel import SelectiveKernel from .split_attn import SplitAttn from .squeeze_excite import SEModule, EffectiveSEModule def get_attn(attn_type): if isinstance(attn_type, torch.nn.Module): return attn_type module_cls = None if attn_type: if isinstance(attn_type, str): attn_type = attn_type.lower() # Lightweight attention modules (channel and/or coarse spatial). # Typically added to existing network architecture blocks in addition to existing convolutions. if attn_type == 'se': module_cls = SEModule elif attn_type == 'ese': module_cls = EffectiveSEModule elif attn_type == 'eca': module_cls = EcaModule elif attn_type == 'ecam': module_cls = partial(EcaModule, use_mlp=True) elif attn_type == 'ceca': module_cls = CecaModule elif attn_type == 'ge': module_cls = GatherExcite elif attn_type == 'gc': module_cls = GlobalContext elif attn_type == 'gca': module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) elif attn_type == 'cbam': module_cls = CbamModule elif attn_type == 'lcbam': module_cls = LightCbamModule # Attention / attention-like modules w/ significant params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'sk': module_cls = SelectiveKernel elif attn_type == 'splat': module_cls = SplitAttn # Self-attention / attention-like modules w/ significant compute and/or params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'lambda': return LambdaLayer elif attn_type == 'bottleneck': return BottleneckAttn elif attn_type == 'halo': return HaloAttn elif attn_type == 'nl': module_cls = NonLocalAttn elif attn_type == 'bat': module_cls = BatNonLocalAttn # Woops! else: assert False, "Invalid attn module (%s)" % attn_type elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type return module_cls def create_attn(attn_type, channels, **kwargs): module_cls = get_attn(attn_type) if module_cls is not None: # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels return module_cls(channels, **kwargs) return None
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed_sincos.py
""" Sin-cos, fourier, rotary position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math from typing import List, Tuple, Optional, Union import torch from torch import nn as nn from .trace_utils import _assert def pixel_freq_bands( num_bands: int, max_freq: float = 224., linear_bands: bool = True, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ): if linear_bands: bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=dtype, device=device) else: bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=dtype, device=device) return bands * torch.pi def freq_bands( num_bands: int, temperature: float = 10000., step: int = 2, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ) -> torch.Tensor: bands = 1. / (temperature ** (torch.arange(0, num_bands, step, dtype=dtype, device=device) / num_bands)) return bands def build_sincos2d_pos_embed( feat_shape: List[int], dim: int = 64, temperature: float = 10000., reverse_coord: bool = False, interleave_sin_cos: bool = False, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None ) -> torch.Tensor: """ Args: feat_shape: dim: temperature: reverse_coord: stack grid order W, H instead of H, W interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos dtype: device: Returns: """ assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' pos_dim = dim // 4 bands = freq_bands(pos_dim, temperature=temperature, step=1, dtype=dtype, device=device) if reverse_coord: feat_shape = feat_shape[::-1] # stack W, H instead of H, W grid = torch.stack(torch.meshgrid( [torch.arange(s, device=device, dtype=dtype) for s in feat_shape])).flatten(1).transpose(0, 1) pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) # FIXME add support for unflattened spatial dim? stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) return pos_emb def build_fourier_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, num_bands: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, include_grid: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ) -> List[torch.Tensor]: """ Args: feat_shape: Feature shape for embedding. bands: Pre-calculated frequency bands. num_bands: Number of frequency bands (determines output dim). max_res: Maximum resolution for pixel based freq. temperature: Temperature for non-pixel freq. linear_bands: Linear band spacing for pixel based freq. include_grid: Include the spatial grid in output. in_pixels: Output in pixel freq. ref_feat_shape: Reference feature shape for resize / fine-tune. dtype: Output dtype. device: Output device. Returns: """ if bands is None: if in_pixels: bands = pixel_freq_bands( num_bands, float(max_res), linear_bands=linear_bands, dtype=dtype, device=device, ) else: bands = freq_bands( num_bands, temperature=temperature, step=1, dtype=dtype, device=device, ) else: if device is None: device = bands.device if dtype is None: dtype = bands.dtype if in_pixels: t = [torch.linspace(-1., 1., steps=s, device=device, dtype=dtype) for s in feat_shape] else: t = [torch.arange(s, device=device, dtype=dtype) for s in feat_shape] if ref_feat_shape is not None: # eva's scheme for resizing rope embeddings (ref shape = pretrain) t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)] grid = torch.stack(torch.meshgrid(t), dim=-1) grid = grid.unsqueeze(-1) pos = grid * bands pos_sin, pos_cos = pos.sin(), pos.cos() out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos] return out class FourierEmbed(nn.Module): def __init__( self, max_res: int = 224, num_bands: int = 64, concat_grid=True, keep_spatial=False, ): super().__init__() self.max_res = max_res self.num_bands = num_bands self.concat_grid = concat_grid self.keep_spatial = keep_spatial self.register_buffer( 'bands', pixel_freq_bands(max_res, num_bands), persistent=False, ) def forward(self, x): B, C = x.shape[:2] feat_shape = x.shape[2:] emb = build_fourier_pos_embed( feat_shape, self.bands, include_grid=self.concat_grid, dtype=x.dtype, device=x.device, ) emb = torch.cat(emb, dim=-1) emb = emb.transpose(-1, -2).flatten(len(feat_shape)) batch_expand = (B,) + (-1,) * (x.ndim - 1) # FIXME support nD if self.keep_spatial: x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) else: x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) x = x.reshape(B, feat_shape.numel(), -1) return x def rot(x): return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): if isinstance(x, torch.Tensor): x = [x] return [t * cos_emb + rot(t) * sin_emb for t in x] def apply_rot_embed_cat(x: torch.Tensor, emb): sin_emb, cos_emb = emb.tensor_split(2, -1) if sin_emb.ndim == 3: return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) return x * cos_emb + rot(x) * sin_emb def apply_keep_indices_nlc(x, pos_embed, keep_indices): pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1) pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])) return pos_embed def build_rotary_pos_embed( feat_shape: List[int], bands: Optional[torch.Tensor] = None, dim: int = 64, max_res: int = 224, temperature: float = 10000., linear_bands: bool = False, in_pixels: bool = True, ref_feat_shape: Optional[List[int]] = None, dtype: torch.dtype = torch.float32, device: Optional[torch.device] = None, ): """ Args: feat_shape: Spatial shape of the target tensor for embedding. bands: Optional pre-generated frequency bands dim: Output dimension of embedding tensor. max_res: Maximum resolution for pixel mode. temperature: Temperature (inv freq) for non-pixel mode linear_bands: Linearly (instead of log) spaced bands for pixel mode in_pixels: Pixel vs language (inv freq) mode. dtype: Output dtype. device: Output device. Returns: """ sin_emb, cos_emb = build_fourier_pos_embed( feat_shape, bands=bands, num_bands=dim // 4, max_res=max_res, temperature=temperature, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=ref_feat_shape, device=device, dtype=dtype, ) num_spatial_dim = 1 # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks for x in feat_shape: num_spatial_dim *= x sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) return sin_emb, cos_emb class RotaryEmbedding(nn.Module): """ Rotary position embedding NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not been well tested, and will likely change. It will be moved to its own file. The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) print(bands) self.register_buffer( 'bands', bands, persistent=False, ) self.pos_embed_sin = None self.pos_embed_cos = None else: # cache full sin/cos embeddings if shape provided up front emb_sin, emb_cos = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed_sin', emb_sin, persistent=False, ) self.register_buffer( 'pos_embed_cos', emb_cos, persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None: # rebuild embeddings every call, use if target shape changes assert shape is not None return build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ) else: return self.pos_embed_sin, self.pos_embed_cos def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 sin_emb, cos_emb = self.get_embed(x.shape[2:]) return apply_rot_embed(x, sin_emb, cos_emb) class RotaryEmbeddingCat(nn.Module): """ Rotary position embedding w/ concatenatd sin & cos The following impl/resources were referenced for this impl: * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py * https://blog.eleuther.ai/rotary-embeddings/ """ def __init__( self, dim, max_res=224, temperature=10000, in_pixels=True, linear_bands: bool = False, feat_shape: Optional[List[int]] = None, ref_feat_shape: Optional[List[int]] = None, ): super().__init__() self.dim = dim self.max_res = max_res self.temperature = temperature self.in_pixels = in_pixels self.feat_shape = feat_shape self.ref_feat_shape = ref_feat_shape if feat_shape is None: # only cache bands if in_pixels: bands = pixel_freq_bands( dim // 4, float(max_res), linear_bands=linear_bands, ) else: bands = freq_bands( dim // 4, temperature=temperature, step=1, ) print(bands) self.register_buffer( 'bands', bands, persistent=False, ) self.embed = None else: # cache full sin/cos embeddings if shape provided up front embeds = build_rotary_pos_embed( feat_shape=feat_shape, dim=dim, max_res=max_res, linear_bands=linear_bands, in_pixels=in_pixels, ref_feat_shape=self.ref_feat_shape, ) self.bands = None self.register_buffer( 'pos_embed', torch.cat(embeds, -1), persistent=False, ) def get_embed(self, shape: Optional[List[int]] = None): if self.bands is not None: # rebuild embeddings every call, use if target shape changes _assert(shape is not None, 'valid shape needed') embeds = build_rotary_pos_embed( shape, self.bands, in_pixels=self.in_pixels, ) return torch.cat(embeds, -1) else: return self.pos_embed def forward(self, x): # assuming channel-first tensor where spatial dim are >= 2 pos_embed = self.get_embed(x.shape[2:]) return apply_rot_embed_cat(x, pos_embed)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_act.py
""" Activation Factory Hacked together by / Copyright 2020 Ross Wightman """ from typing import Union, Callable, Type from .activations import * from .activations_jit import * from .activations_me import * from .config import is_exportable, is_scriptable, is_no_jit # PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7. # Also hardsigmoid, hardswish, and soon mish. This code will use native version if present. # Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used. _has_silu = 'silu' in dir(torch.nn.functional) _has_hardswish = 'hardswish' in dir(torch.nn.functional) _has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional) _has_mish = 'mish' in dir(torch.nn.functional) _ACT_FN_DEFAULT = dict( silu=F.silu if _has_silu else swish, swish=F.silu if _has_silu else swish, mish=F.mish if _has_mish else mish, relu=F.relu, relu6=F.relu6, leaky_relu=F.leaky_relu, elu=F.elu, celu=F.celu, selu=F.selu, gelu=gelu, gelu_tanh=gelu_tanh, sigmoid=sigmoid, tanh=tanh, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid, hard_swish=F.hardswish if _has_hardswish else hard_swish, hard_mish=hard_mish, ) _ACT_FN_JIT = dict( silu=F.silu if _has_silu else swish_jit, swish=F.silu if _has_silu else swish_jit, mish=F.mish if _has_mish else mish_jit, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit, hard_swish=F.hardswish if _has_hardswish else hard_swish_jit, hard_mish=hard_mish_jit ) _ACT_FN_ME = dict( silu=F.silu if _has_silu else swish_me, swish=F.silu if _has_silu else swish_me, mish=F.mish if _has_mish else mish_me, hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me, hard_swish=F.hardswish if _has_hardswish else hard_swish_me, hard_mish=hard_mish_me, ) _ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT) for a in _ACT_FNS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) _ACT_LAYER_DEFAULT = dict( silu=nn.SiLU if _has_silu else Swish, swish=nn.SiLU if _has_silu else Swish, mish=nn.Mish if _has_mish else Mish, relu=nn.ReLU, relu6=nn.ReLU6, leaky_relu=nn.LeakyReLU, elu=nn.ELU, prelu=PReLU, celu=nn.CELU, selu=nn.SELU, gelu=GELU, gelu_tanh=GELUTanh, sigmoid=Sigmoid, tanh=Tanh, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid, hard_swish=nn.Hardswish if _has_hardswish else HardSwish, hard_mish=HardMish, identity=nn.Identity, ) _ACT_LAYER_JIT = dict( silu=nn.SiLU if _has_silu else SwishJit, swish=nn.SiLU if _has_silu else SwishJit, mish=nn.Mish if _has_mish else MishJit, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit, hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit, hard_mish=HardMishJit ) _ACT_LAYER_ME = dict( silu=nn.SiLU if _has_silu else SwishMe, swish=nn.SiLU if _has_silu else SwishMe, mish=nn.Mish if _has_mish else MishMe, hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe, hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe, hard_mish=HardMishMe, ) _ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT) for a in _ACT_LAYERS: a.setdefault('hardsigmoid', a.get('hard_sigmoid')) a.setdefault('hardswish', a.get('hard_swish')) def get_act_fn(name: Union[Callable, str] = 'relu'): """ Activation Function Factory Fetching activation fns by name with this function allows export or torch script friendly functions to be returned dynamically based on current config. """ if not name: return None if isinstance(name, Callable): return name if not (is_no_jit() or is_exportable() or is_scriptable()): # If not exporting or scripting the model, first look for a memory-efficient version with # custom autograd, then fallback if name in _ACT_FN_ME: return _ACT_FN_ME[name] if not (is_no_jit() or is_exportable()): if name in _ACT_FN_JIT: return _ACT_FN_JIT[name] return _ACT_FN_DEFAULT[name] def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'): """ Activation Layer Factory Fetching activation layers by name with this function allows export or torch script friendly functions to be returned dynamically based on current config. """ if not name: return None if not isinstance(name, str): # callable, module, etc return name if not (is_no_jit() or is_exportable() or is_scriptable()): if name in _ACT_LAYER_ME: return _ACT_LAYER_ME[name] if not (is_no_jit() or is_exportable()): if name in _ACT_LAYER_JIT: return _ACT_LAYER_JIT[name] return _ACT_LAYER_DEFAULT[name] def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs): act_layer = get_act_layer(name) if act_layer is None: return None if inplace is None: return act_layer(**kwargs) try: return act_layer(inplace=inplace, **kwargs) except TypeError: # recover if act layer doesn't have inplace arg return act_layer(**kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/split_attn.py
""" Split Attention Conv2d (for ResNeSt Models) Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt Modified for torchscript compat, performance, and consistency with timm by Ross Wightman """ import torch import torch.nn.functional as F from torch import nn from .helpers import make_divisible class RadixSoftmax(nn.Module): def __init__(self, radix, cardinality): super(RadixSoftmax, self).__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttn(nn.Module): """Split-Attention (aka Splat) """ def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): super(SplitAttn, self).__init__() out_channels = out_channels or in_channels self.radix = radix mid_chs = out_channels * radix if rd_channels is None: attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) else: attn_chs = rd_channels * radix padding = kernel_size // 2 if padding is None else padding self.conv = nn.Conv2d( in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.drop(x) x = self.act0(x) B, RC, H, W = x.shape if self.radix > 1: x = x.reshape((B, self.radix, RC // self.radix, H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = x_gap.mean((2, 3), keepdim=True) x_gap = self.fc1(x_gap) x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) if self.radix > 1: out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) else: out = x * x_attn return out.contiguous()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/weight_init.py
import torch import math import warnings from torch.nn.init import _calculate_fan_in_and_fan_out def _trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2) # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are applied while sampling the normal with mean/std applied, therefore a, b args should be adjusted to match the range of mean, std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ with torch.no_grad(): return _trunc_normal_(tensor, mean, std, a, b) def trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.): # type: (Tensor, float, float, float, float) -> Tensor r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0 and the result is subsquently scaled and shifted by the mean and std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ with torch.no_grad(): _trunc_normal_(tensor, 0, 1.0, a, b) tensor.mul_(std).add_(mean) return tensor def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == 'fan_in': denom = fan_in elif mode == 'fan_out': denom = fan_out elif mode == 'fan_avg': denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978) elif distribution == "normal": with torch.no_grad(): tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) with torch.no_grad(): tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/separable_conv.py
""" Depthwise Separable Conv Modules Basic DWS convs. Other variations of DWS exist with batch norm or activations between the DW and PW convs such as the Depthwise modules in MobileNetV2 / EfficientNet and Xception. Hacked together by / Copyright 2020 Ross Wightman """ from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class SeparableConvNormAct(nn.Module): """ Separable Conv w/ trailing Norm and Activation """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1, norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU, apply_act=True, drop_layer=None): super(SeparableConvNormAct, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) norm_act_layer = get_norm_act_layer(norm_layer, act_layer) norm_kwargs = dict(drop_layer=drop_layer) if drop_layer is not None else {} self.bn = norm_act_layer(out_channels, apply_act=apply_act, **norm_kwargs) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) x = self.bn(x) return x SeparableConvBnAct = SeparableConvNormAct class SeparableConv2d(nn.Module): """ Separable Conv """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, channel_multiplier=1.0, pw_kernel_size=1): super(SeparableConv2d, self).__init__() self.conv_dw = create_conv2d( in_channels, int(in_channels * channel_multiplier), kernel_size, stride=stride, dilation=dilation, padding=padding, depthwise=True) self.conv_pw = create_conv2d( int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) @property def in_channels(self): return self.conv_dw.in_channels @property def out_channels(self): return self.conv_pw.out_channels def forward(self, x): x = self.conv_dw(x) x = self.conv_pw(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/evo_norm.py
""" EvoNorm in PyTorch Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 @inproceedings{NEURIPS2020, author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, booktitle = {Advances in Neural Information Processing Systems}, editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, pages = {13539--13550}, publisher = {Curran Associates, Inc.}, title = {Evolving Normalization-Activation Layers}, url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, volume = {33}, year = {2020} } An attempt at getting decent performing EvoNorms running in PyTorch. While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm in terms of memory usage and throughput on GPUs. I'm testing these modules on TPU w/ PyTorch XLA. Promising start but currently working around some issues with builtin torch/tensor.var/std. Unlike GPU, similar train speeds for EvoNormS variants and BatchNorm. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Sequence, Union import torch import torch.nn as nn import torch.nn.functional as F from .create_act import create_act_layer from .trace_utils import _assert def instance_std(x, eps: float = 1e-5): std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) return std.expand(x.shape) def instance_std_tpu(x, eps: float = 1e-5): std = manual_var(x, dim=(2, 3)).add(eps).sqrt() return std.expand(x.shape) # instance_std = instance_std_tpu def instance_rms(x, eps: float = 1e-5): rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) return rms.expand(x.shape) def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): xm = x.mean(dim=dim, keepdim=True) if diff_sqm: # difference of squared mean and mean squared, faster on TPU can be less stable var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) else: var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) return var def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): B, C, H, W = x.shape x_dtype = x.dtype _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) else: x = x.reshape(B, groups, C // groups, H, W) std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) return std.expand(x.shape).reshape(B, C, H, W) def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): # This is a workaround for some stability / odd behaviour of .var and .std # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results B, C, H, W = x.shape _assert(C % groups == 0, '') if flatten: x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues var = manual_var(x, dim=-1, diff_sqm=diff_sqm) else: x = x.reshape(B, groups, C // groups, H, W) var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) #group_std = group_std_tpu # FIXME TPU temporary def group_rms(x, groups: int = 32, eps: float = 1e-5): B, C, H, W = x.shape _assert(C % groups == 0, '') x_dtype = x.dtype x = x.reshape(B, groups, C // groups, H, W) rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) return rms.expand(x.shape).reshape(B, C, H, W) class EvoNorm2dB0(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) # var = manual_var(x, dim=(0, 2, 3)).squeeze() n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach() * self.momentum * (n / (n - 1))) else: var = self.running_var left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) v = self.v.to(x_dtype).view(v_shape) right = x * v + instance_std(x, self.eps) x = x / left.max(right) return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) class EvoNorm2dB1(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = (x + 1) * instance_rms(x, self.eps) x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dB2(nn.Module): def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) self.momentum = momentum self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: if self.training: var = x.float().var(dim=(0, 2, 3), unbiased=False) n = x.numel() / x.shape[1] self.running_var.copy_( self.running_var * (1 - self.momentum) + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) else: var = self.running_var var = var.to(x_dtype).view(v_shape) left = var.add(self.eps).sqrt_() right = instance_rms(x, self.eps) - x x = x / left.max(right) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0(nn.Module): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): super().__init__() self.apply_act = apply_act # apply activation (non-linearity) if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.v is not None: nn.init.ones_(self.v) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS0a(EvoNorm2dS0): def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) d = group_std(x, self.groups, self.eps) if self.v is not None: v = self.v.view(v_shape).to(x_dtype) x = x * (x * v).sigmoid() x = x / d return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.pre_act_norm = False self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS1a(EvoNorm2dS1): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_std(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2(nn.Module): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-5, **_): super().__init__() act_layer = act_layer or nn.SiLU self.apply_act = apply_act # apply activation (non-linearity) if act_layer is not None and apply_act: self.act = create_act_layer(act_layer) else: self.act = nn.Identity() if group_size: assert num_features % group_size == 0 self.groups = num_features // group_size else: self.groups = groups self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) if self.apply_act: x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) class EvoNorm2dS2a(EvoNorm2dS2): def __init__( self, num_features, groups=32, group_size=None, apply_act=True, act_layer=None, eps=1e-3, **_): super().__init__( num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = self.act(x) / group_rms(x, self.groups, self.eps) return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/global_context.py
""" Global Context Attention Block Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` - https://arxiv.org/abs/1904.11492 Official code consulted as reference: https://github.com/xvjiarui/GCNet Hacked together by / Copyright 2021 Ross Wightman """ from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible from .mlp import ConvMlp from .norm import LayerNorm2d class GlobalContext(nn.Module): def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): super(GlobalContext, self).__init__() act_layer = get_act_layer(act_layer) self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) if fuse_add: self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_add = None if fuse_scale: self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) else: self.mlp_scale = None self.gate = create_act_layer(gate_layer) self.init_last_zero = init_last_zero self.reset_parameters() def reset_parameters(self): if self.conv_attn is not None: nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') if self.mlp_add is not None: nn.init.zeros_(self.mlp_add.fc2.weight) def forward(self, x): B, C, H, W = x.shape if self.conv_attn is not None: attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) context = x.reshape(B, C, H * W).unsqueeze(1) @ attn context = context.view(B, C, 1, 1) else: context = x.mean(dim=(2, 3), keepdim=True) if self.mlp_scale is not None: mlp_x = self.mlp_scale(context) x = x * self.gate(mlp_x) if self.mlp_add is not None: mlp_x = self.mlp_add(context) x = x + mlp_x return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/test_time_pool.py
""" Test Time Pooling (Average-Max Pool) Hacked together by / Copyright 2020 Ross Wightman """ import logging from torch import nn import torch.nn.functional as F from .adaptive_avgmax_pool import adaptive_avgmax_pool2d _logger = logging.getLogger(__name__) class TestTimePoolHead(nn.Module): def __init__(self, base, original_pool=7): super(TestTimePoolHead, self).__init__() self.base = base self.original_pool = original_pool base_fc = self.base.get_classifier() if isinstance(base_fc, nn.Conv2d): self.fc = base_fc else: self.fc = nn.Conv2d( self.base.num_features, self.base.num_classes, kernel_size=1, bias=True) self.fc.weight.data.copy_(base_fc.weight.data.view(self.fc.weight.size())) self.fc.bias.data.copy_(base_fc.bias.data.view(self.fc.bias.size())) self.base.reset_classifier(0) # delete original fc layer def forward(self, x): x = self.base.forward_features(x) x = F.avg_pool2d(x, kernel_size=self.original_pool, stride=1) x = self.fc(x) x = adaptive_avgmax_pool2d(x, 1) return x.view(x.size(0), -1) def apply_test_time_pool(model, config, use_test_size=False): test_time_pool = False if not hasattr(model, 'default_cfg') or not model.default_cfg: return model, False if use_test_size and 'test_input_size' in model.default_cfg: df_input_size = model.default_cfg['test_input_size'] else: df_input_size = model.default_cfg['input_size'] if config['input_size'][-1] > df_input_size[-1] and config['input_size'][-2] > df_input_size[-2]: _logger.info('Target input size %s > pretrained default %s, using test time pooling' % (str(config['input_size'][-2:]), str(df_input_size[-2:]))) model = TestTimePoolHead(model, original_pool=model.default_cfg['pool_size']) test_time_pool = True return model, test_time_pool
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations_jit.py
""" Activations A collection of jit-scripted activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted versions if they contain in-place ops. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit(x, inplace: bool = False): """Swish - Described in: https://arxiv.org/abs/1710.05941 """ return x.mul(x.sigmoid()) @torch.jit.script def mish_jit(x, _inplace: bool = False): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 """ return x.mul(F.softplus(x).tanh()) class SwishJit(nn.Module): def __init__(self, inplace: bool = False): super(SwishJit, self).__init__() def forward(self, x): return swish_jit(x) class MishJit(nn.Module): def __init__(self, inplace: bool = False): super(MishJit, self).__init__() def forward(self, x): return mish_jit(x) @torch.jit.script def hard_sigmoid_jit(x, inplace: bool = False): # return F.relu6(x + 3.) / 6. return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSigmoidJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidJit, self).__init__() def forward(self, x): return hard_sigmoid_jit(x) @torch.jit.script def hard_swish_jit(x, inplace: bool = False): # return x * (F.relu6(x + 3.) / 6) return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster? class HardSwishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishJit, self).__init__() def forward(self, x): return hard_swish_jit(x) @torch.jit.script def hard_mish_jit(x, inplace: bool = False): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMishJit(nn.Module): def __init__(self, inplace: bool = False): super(HardMishJit, self).__init__() def forward(self, x): return hard_mish_jit(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/mlp.py
""" MLP module w/ dropout and configurable activation layer Hacked together by / Copyright 2020 Ross Wightman """ from functools import partial from torch import nn as nn from .grn import GlobalResponseNorm from .helpers import to_2tuple class Mlp(nn.Module): """ MLP as used in Vision Transformer, MLP-Mixer and related networks """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class GluMlp(nn.Module): """ MLP w/ GLU style gating See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202 """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, norm_layer=None, bias=True, drop=0., use_conv=False, gate_last=True, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features assert hidden_features % 2 == 0 bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.chunk_dim = 1 if use_conv else -1 self.gate_last = gate_last # use second half of width for gate self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features // 2) if norm_layer is not None else nn.Identity() self.fc2 = linear_layer(hidden_features // 2, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 fc1_mid = self.fc1.bias.shape[0] // 2 nn.init.ones_(self.fc1.bias[fc1_mid:]) nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6) def forward(self, x): x = self.fc1(x) x1, x2 = x.chunk(2, dim=self.chunk_dim) x = x1 * self.act(x2) if self.gate_last else self.act(x1) * x2 x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x SwiGLUPacked = partial(GluMlp, act_layer=nn.SiLU, gate_last=False) class SwiGLU(nn.Module): """ SwiGLU NOTE: GluMLP above can implement SwiGLU, but this impl has split fc1 and better matches some other common impl which makes mapping checkpoints simpler. """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1_g = nn.Linear(in_features, hidden_features, bias=bias[0]) self.fc1_x = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) self.drop = nn.Dropout(drop) def init_weights(self): # override init of fc1 w/ gate portion set to weight near zero, bias=1 nn.init.ones_(self.fc1_g.bias) nn.init.normal_(self.fc1_g.weight, std=1e-6) def forward(self, x): x_gate = self.fc1_g(x) x = self.fc1_x(x) x = self.act(x_gate) * x x = self.drop1(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class GatedMlp(nn.Module): """ MLP as used in gMLP """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=None, gate_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) self.fc1 = nn.Linear(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) if gate_layer is not None: assert hidden_features % 2 == 0 self.gate = gate_layer(hidden_features) hidden_features = hidden_features // 2 # FIXME base reduction on gate property? else: self.gate = nn.Identity() self.norm = norm_layer(hidden_features) if norm_layer is not None else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.gate(x) x = self.norm(x) x = self.fc2(x) x = self.drop2(x) return x class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, bias=True, drop=0., ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0]) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity() self.act = act_layer() self.drop = nn.Dropout(drop) self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1]) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x class GlobalResponseNormMlp(nn.Module): """ MLP w/ Global Response Norm (see grn.py), nn.Linear or 1x1 Conv2d """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, bias=True, drop=0., use_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features bias = to_2tuple(bias) drop_probs = to_2tuple(drop) linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear self.fc1 = linear_layer(in_features, hidden_features, bias=bias[0]) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.grn = GlobalResponseNorm(hidden_features, channels_last=not use_conv) self.fc2 = linear_layer(hidden_features, out_features, bias=bias[1]) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.grn(x) x = self.fc2(x) x = self.drop2(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/mixed_conv2d.py
""" PyTorch Mixed Convolution Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv2d_same import create_conv2d_pad def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split class MixedConv2d(nn.ModuleDict): """ Mixed Grouped Convolution Based on MDConv and GroupedConv in MixNet impl: https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = in_ch if depthwise else 1 # use add_module to keep key space clean self.add_module( str(idx), create_conv2d_pad( in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs) ) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [c(x_split[i]) for i, c in enumerate(self.values())] x = torch.cat(x_out, 1) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/cbam.py
""" CBAM (sort-of) Attention Experimental impl of CBAM: Convolutional Block Attention Module: https://arxiv.org/abs/1807.06521 WARNING: Results with these attention layers have been mixed. They can significantly reduce performance on some tasks, especially fine-grained it seems. I may end up removing this impl. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn import torch.nn.functional as F from .conv_bn_act import ConvNormAct from .create_act import create_act_layer, get_act_layer from .helpers import make_divisible class ChannelAttn(nn.Module): """ Original CBAM channel attention module, currently avg + max pool variant only. """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(ChannelAttn, self).__init__() if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias) self.act = act_layer(inplace=True) self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias) self.gate = create_act_layer(gate_layer) def forward(self, x): x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True)))) x_max = self.fc2(self.act(self.fc1(x.amax((2, 3), keepdim=True)))) return x * self.gate(x_avg + x_max) class LightChannelAttn(ChannelAttn): """An experimental 'lightweight' that sums avg + max pool first """ def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightChannelAttn, self).__init__( channels, rd_ratio, rd_channels, rd_divisor, act_layer, gate_layer, mlp_bias) def forward(self, x): x_pool = 0.5 * x.mean((2, 3), keepdim=True) + 0.5 * x.amax((2, 3), keepdim=True) x_attn = self.fc2(self.act(self.fc1(x_pool))) return x * F.sigmoid(x_attn) class SpatialAttn(nn.Module): """ Original CBAM spatial attention module """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(SpatialAttn, self).__init__() self.conv = ConvNormAct(2, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = torch.cat([x.mean(dim=1, keepdim=True), x.amax(dim=1, keepdim=True)], dim=1) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class LightSpatialAttn(nn.Module): """An experimental 'lightweight' variant that sums avg_pool and max_pool results. """ def __init__(self, kernel_size=7, gate_layer='sigmoid'): super(LightSpatialAttn, self).__init__() self.conv = ConvNormAct(1, 1, kernel_size, apply_act=False) self.gate = create_act_layer(gate_layer) def forward(self, x): x_attn = 0.5 * x.mean(dim=1, keepdim=True) + 0.5 * x.amax(dim=1, keepdim=True) x_attn = self.conv(x_attn) return x * self.gate(x_attn) class CbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(CbamModule, self).__init__() self.channel = ChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x class LightCbamModule(nn.Module): def __init__( self, channels, rd_ratio=1./16, rd_channels=None, rd_divisor=1, spatial_kernel_size=7, act_layer=nn.ReLU, gate_layer='sigmoid', mlp_bias=False): super(LightCbamModule, self).__init__() self.channel = LightChannelAttn( channels, rd_ratio=rd_ratio, rd_channels=rd_channels, rd_divisor=rd_divisor, act_layer=act_layer, gate_layer=gate_layer, mlp_bias=mlp_bias) self.spatial = LightSpatialAttn(spatial_kernel_size) def forward(self, x): x = self.channel(x) x = self.spatial(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/patch_embed.py
""" Image to Patch Embedding using Conv2d A convolution based approach to patchifying a 2D image w/ embedding projection. Based on code in: * https://github.com/google-research/vision_transformer * https://github.com/google-research/big_vision/tree/main/big_vision Hacked together by / Copyright 2020 Ross Wightman """ import logging from typing import Callable, List, Optional, Tuple, Union import torch from torch import nn as nn import torch.nn.functional as F from .format import Format, nchw_to from .helpers import to_2tuple from .trace_utils import _assert _logger = logging.getLogger(__name__) class PatchEmbed(nn.Module): """ 2D Image to Patch Embedding """ output_fmt: Format def __init__( self, img_size: Optional[int] = 224, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, norm_layer: Optional[Callable] = None, flatten: bool = True, output_fmt: Optional[str] = None, bias: bool = True, strict_img_size: bool = True, ): super().__init__() self.patch_size = to_2tuple(patch_size) if img_size is not None: self.img_size = to_2tuple(img_size) self.grid_size = tuple([s // p for s, p in zip(self.img_size, self.patch_size)]) self.num_patches = self.grid_size[0] * self.grid_size[1] else: self.img_size = None self.grid_size = None self.num_patches = None if output_fmt is not None: self.flatten = False self.output_fmt = Format(output_fmt) else: # flatten spatial dim and transpose to channels last, kept for bwd compat self.flatten = flatten self.output_fmt = Format.NCHW self.strict_img_size = strict_img_size self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): B, C, H, W = x.shape if self.img_size is not None: if self.strict_img_size: _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).") _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).") else: _assert( H % self.patch_size[0] == 0, f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})." ) _assert( W % self.patch_size[1] == 0, f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})." ) x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # NCHW -> NLC elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return x class PatchEmbedWithSize(PatchEmbed): """ 2D Image to Patch Embedding """ output_fmt: Format def __init__( self, img_size: Optional[int] = 224, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, norm_layer: Optional[Callable] = None, flatten: bool = True, output_fmt: Optional[str] = None, bias: bool = True, ): super().__init__( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=norm_layer, flatten=flatten, output_fmt=output_fmt, bias=bias, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: B, C, H, W = x.shape if self.img_size is not None: _assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).") _assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).") x = self.proj(x) grid_size = x.shape[-2:] if self.flatten: x = x.flatten(2).transpose(1, 2) # NCHW -> NLC elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) x = self.norm(x) return x, grid_size def resample_patch_embed( patch_embed, new_size: List[int], interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): """Resample the weights of the patch embedding kernel to target resolution. We resample the patch embedding kernel by approximately inverting the effect of patch resizing. Code based on: https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py With this resizing, we can for example load a B/8 filter into a B/16 model and, on 2x larger input image, the result will match. Args: patch_embed: original parameter to be resized. new_size (tuple(int, int): target shape (height, width)-only. interpolation (str): interpolation for resize antialias (bool): use anti-aliasing filter in resize verbose (bool): log operation Returns: Resized patch embedding kernel. """ import numpy as np try: import functorch vmap = functorch.vmap except ImportError: if hasattr(torch, 'vmap'): vmap = torch.vmap else: assert False, "functorch or a version of torch with vmap is required for FlexiViT resizing." assert len(patch_embed.shape) == 4, "Four dimensions expected" assert len(new_size) == 2, "New shape should only be hw" old_size = patch_embed.shape[-2:] if tuple(old_size) == tuple(new_size): return patch_embed if verbose: _logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.") def resize(x_np, _new_size): x_tf = torch.Tensor(x_np)[None, None, ...] x_upsampled = F.interpolate( x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy() return x_upsampled def get_resize_mat(_old_size, _new_size): mat = [] for i in range(np.prod(_old_size)): basis_vec = np.zeros(_old_size) basis_vec[np.unravel_index(i, _old_size)] = 1. mat.append(resize(basis_vec, _new_size).reshape(-1)) return np.stack(mat).T resize_mat = get_resize_mat(old_size, new_size) resize_mat_pinv = torch.Tensor(np.linalg.pinv(resize_mat.T)) def resample_kernel(kernel): resampled_kernel = resize_mat_pinv @ kernel.reshape(-1) return resampled_kernel.reshape(new_size) v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1) return v_resample_kernel(patch_embed) # def divs(n, m=None): # m = m or n // 2 # if m == 1: # return [1] # if n % m == 0: # return [m] + divs(n, m - 1) # return divs(n, m - 1) # # # class FlexiPatchEmbed(nn.Module): # """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT) # FIXME WIP # """ # def __init__( # self, # img_size=240, # patch_size=16, # in_chans=3, # embed_dim=768, # base_img_size=240, # base_patch_size=32, # norm_layer=None, # flatten=True, # bias=True, # ): # super().__init__() # self.img_size = to_2tuple(img_size) # self.patch_size = to_2tuple(patch_size) # self.num_patches = 0 # # # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48) # self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30) # # self.base_img_size = to_2tuple(base_img_size) # self.base_patch_size = to_2tuple(base_patch_size) # self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)]) # self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1] # # self.flatten = flatten # self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias) # self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() # # def forward(self, x): # B, C, H, W = x.shape # # if self.patch_size == self.base_patch_size: # weight = self.proj.weight # else: # weight = resample_patch_embed(self.proj.weight, self.patch_size) # patch_size = self.patch_size # x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size) # if self.flatten: # x = x.flatten(2).transpose(1, 2) # BCHW -> BNC # x = self.norm(x) # return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_conv2d.py
""" Create Conv2d Factory Method Hacked together by / Copyright 2020 Ross Wightman """ from .mixed_conv2d import MixedConv2d from .cond_conv2d import CondConv2d from .conv2d_same import create_conv2d_pad def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): """ Select a 2d convolution implementation based on arguments Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. Used extensively by EfficientNet, MobileNetv3 and related networks. """ if isinstance(kernel_size, list): assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently if 'groups' in kwargs: groups = kwargs.pop('groups') if groups == in_channels: kwargs['depthwise'] = True else: assert groups == 1 # We're going to use only lists for defining the MixedConv2d kernel groups, # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) else: depthwise = kwargs.pop('depthwise', False) # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 groups = in_channels if depthwise else kwargs.pop('groups', 1) if 'num_experts' in kwargs and kwargs['num_experts'] > 0: m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) else: m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) return m
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/cond_conv2d.py
""" PyTorch Conditionally Parameterized Convolution (CondConv) Paper: CondConv: Conditionally Parameterized Convolutions for Efficient Inference (https://arxiv.org/abs/1904.04971) Hacked together by / Copyright 2020 Ross Wightman """ import math from functools import partial import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from .helpers import to_2tuple from .conv2d_same import conv2d_same from .padding import get_padding_value def get_condconv_initializer(initializer, num_experts, expert_shape): def condconv_initializer(weight): """CondConv initializer function.""" num_params = np.prod(expert_shape) if (len(weight.shape) != 2 or weight.shape[0] != num_experts or weight.shape[1] != num_params): raise (ValueError( 'CondConv variables must have shape [num_experts, num_params]')) for i in range(num_experts): initializer(weight[i].view(expert_shape)) return condconv_initializer class CondConv2d(nn.Module): """ Conditionally Parameterized Convolution Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion: https://github.com/pytorch/pytorch/issues/17983 """ __constants__ = ['in_channels', 'out_channels', 'dynamic_padding'] def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4): super(CondConv2d, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = to_2tuple(kernel_size) self.stride = to_2tuple(stride) padding_val, is_padding_dynamic = get_padding_value( padding, kernel_size, stride=stride, dilation=dilation) self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript self.padding = to_2tuple(padding_val) self.dilation = to_2tuple(dilation) self.groups = groups self.num_experts = num_experts self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size weight_num_param = 1 for wd in self.weight_shape: weight_num_param *= wd self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param)) if bias: self.bias_shape = (self.out_channels,) self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): init_weight = get_condconv_initializer( partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape) init_weight(self.weight) if self.bias is not None: fan_in = np.prod(self.weight_shape[1:]) bound = 1 / math.sqrt(fan_in) init_bias = get_condconv_initializer( partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape) init_bias(self.bias) def forward(self, x, routing_weights): B, C, H, W = x.shape weight = torch.matmul(routing_weights, self.weight) new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size weight = weight.view(new_weight_shape) bias = None if self.bias is not None: bias = torch.matmul(routing_weights, self.bias) bias = bias.view(B * self.out_channels) # move batch elements with channels so each batch element can be efficiently convolved with separate kernel # reshape instead of view to work with channels_last input x = x.reshape(1, B * C, H, W) if self.dynamic_padding: out = conv2d_same( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) else: out = F.conv2d( x, weight, bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups * B) out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1]) # Literal port (from TF definition) # x = torch.split(x, 1, 0) # weight = torch.split(weight, 1, 0) # if self.bias is not None: # bias = torch.matmul(routing_weights, self.bias) # bias = torch.split(bias, 1, 0) # else: # bias = [None] * B # out = [] # for xi, wi, bi in zip(x, weight, bias): # wi = wi.view(*self.weight_shape) # if bi is not None: # bi = bi.view(*self.bias_shape) # out.append(self.conv_fn( # xi, wi, bi, stride=self.stride, padding=self.padding, # dilation=self.dilation, groups=self.groups)) # out = torch.cat(out, 0) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/space_to_depth.py
import torch import torch.nn as nn class SpaceToDepth(nn.Module): bs: torch.jit.Final[int] def __init__(self, block_size=4): super().__init__() assert block_size == 4 self.bs = block_size def forward(self, x): N, C, H, W = x.size() x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) x = x.view(N, C * self.bs * self.bs, H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) return x @torch.jit.script class SpaceToDepthJit: def __call__(self, x: torch.Tensor): # assuming hard-coded that block_size==4 for acceleration N, C, H, W = x.size() x = x.view(N, C, H // 4, 4, W // 4, 4) # (N, C, H//bs, bs, W//bs, bs) x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) x = x.view(N, C * 16, H // 4, W // 4) # (N, C*bs^2, H//bs, W//bs) return x class SpaceToDepthModule(nn.Module): def __init__(self, no_jit=False): super().__init__() if not no_jit: self.op = SpaceToDepthJit() else: self.op = SpaceToDepth() def forward(self, x): return self.op(x) class DepthToSpace(nn.Module): def __init__(self, block_size): super().__init__() self.bs = block_size def forward(self, x): N, C, H, W = x.size() x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/norm_act.py
""" Normalization + Activation Layers Provides Norm+Act fns for standard PyTorch norm layers such as * BatchNorm * GroupNorm * LayerNorm This allows swapping with alternative layers that are natively both norm + act such as * EvoNorm (evo_norm.py) * FilterResponseNorm (filter_response_norm.py) * InplaceABN (inplace_abn.py) Hacked together by / Copyright 2022 Ross Wightman """ from typing import Union, List, Optional, Any import torch from torch import nn as nn from torch.nn import functional as F from torchvision.ops.misc import FrozenBatchNorm2d from .create_act import get_act_layer from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm from .trace_utils import _assert def _create_act(act_layer, act_kwargs=None, inplace=False, apply_act=True): act_layer = get_act_layer(act_layer) # string -> nn.Module act_kwargs = act_kwargs or {} if act_layer is not None and apply_act: if inplace: act_kwargs['inplace'] = inplace act = act_layer(**act_kwargs) else: act = nn.Identity() return act class BatchNormAct2d(nn.BatchNorm2d): """BatchNorm + Activation This module performs BatchNorm + Activation in a manner that will remain backwards compatible with weights trained with separate bn, act. This is why we inherit from BN instead of composing it as a .bn member. """ def __init__( self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, device=None, dtype=None, ): try: factory_kwargs = {'device': device, 'dtype': dtype} super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, **factory_kwargs, ) except TypeError: # NOTE for backwards compat with old PyTorch w/o factory device/dtype support super(BatchNormAct2d, self).__init__( num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def forward(self, x): # cut & paste of torch.nn.BatchNorm2d.forward impl to avoid issues with torchscript and tracing _assert(x.ndim == 4, f'expected 4D input (got {x.ndim}D input)') # exponential_average_factor is set to self.momentum # (when it is available) only so that it gets updated # in ONNX graph when this node is exported to ONNX. if self.momentum is None: exponential_average_factor = 0.0 else: exponential_average_factor = self.momentum if self.training and self.track_running_stats: # TODO: if statement only here to tell the jit to skip emitting this when it is None if self.num_batches_tracked is not None: # type: ignore[has-type] self.num_batches_tracked.add_(1) # type: ignore[has-type] if self.momentum is None: # use cumulative moving average exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: # use exponential moving average exponential_average_factor = self.momentum r""" Decide whether the mini-batch stats should be used for normalization rather than the buffers. Mini-batch stats are used in training mode, and in eval mode when buffers are None. """ if self.training: bn_training = True else: bn_training = (self.running_mean is None) and (self.running_var is None) r""" Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are used for normalization (i.e. in eval mode when buffers are not None). """ x = F.batch_norm( x, # If buffers are not to be tracked, ensure that they won't be updated self.running_mean if not self.training or self.track_running_stats else None, self.running_var if not self.training or self.track_running_stats else None, self.weight, self.bias, bn_training, exponential_average_factor, self.eps, ) x = self.drop(x) x = self.act(x) return x class SyncBatchNormAct(nn.SyncBatchNorm): # Thanks to Selim Seferbekov (https://github.com/rwightman/pytorch-image-models/issues/1254) # This is a quick workaround to support SyncBatchNorm for timm BatchNormAct2d layers # but ONLY when used in conjunction with the timm conversion function below. # Do not create this module directly or use the PyTorch conversion function. def forward(self, x: torch.Tensor) -> torch.Tensor: x = super().forward(x) # SyncBN doesn't work with torchscript anyways, so this is fine if hasattr(self, "drop"): x = self.drop(x) if hasattr(self, "act"): x = self.act(x) return x def convert_sync_batchnorm(module, process_group=None): # convert both BatchNorm and BatchNormAct layers to Synchronized variants module_output = module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): if isinstance(module, BatchNormAct2d): # convert timm norm + act layer module_output = SyncBatchNormAct( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group=process_group, ) # set act and drop attr from the original module module_output.act = module.act module_output.drop = module.drop else: # convert standard BatchNorm layers module_output = torch.nn.SyncBatchNorm( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group, ) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked if hasattr(module, "qconfig"): module_output.qconfig = module.qconfig for name, child in module.named_children(): module_output.add_module(name, convert_sync_batchnorm(child, process_group)) del module return module_output class FrozenBatchNormAct2d(torch.nn.Module): """ BatchNormAct2d where the batch statistics and the affine parameters are fixed Args: num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)`` eps (float): a value added to the denominator for numerical stability. Default: 1e-5 """ def __init__( self, num_features: int, eps: float = 1e-5, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super().__init__() self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) def _load_from_state_dict( self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str], ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x: torch.Tensor) -> torch.Tensor: # move reshapes to the beginning # to make it fuser-friendly w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) scale = w * (rv + self.eps).rsqrt() bias = b - rm * scale x = x * scale + bias x = self.act(self.drop(x)) return x def __repr__(self) -> str: return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps}, act={self.act})" def freeze_batch_norm_2d(module): """ Converts all `BatchNorm2d` and `SyncBatchNorm` or `BatchNormAct2d` and `SyncBatchNormAct2d` layers of provided module into `FrozenBatchNorm2d` or `FrozenBatchNormAct2d` respectively. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, (BatchNormAct2d, SyncBatchNormAct)): res = FrozenBatchNormAct2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = freeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def unfreeze_batch_norm_2d(module): """ Converts all `FrozenBatchNorm2d` layers of provided module into `BatchNorm2d`. If `module` is itself and instance of `FrozenBatchNorm2d`, it is converted into `BatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module if isinstance(module, FrozenBatchNormAct2d): res = BatchNormAct2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps res.drop = module.drop res.act = module.act elif isinstance(module, FrozenBatchNorm2d): res = torch.nn.BatchNorm2d(module.num_features) if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for name, child in module.named_children(): new_child = unfreeze_batch_norm_2d(child) if new_child is not child: res.add_module(name, new_child) return res def _num_groups(num_channels, num_groups, group_size): if group_size: assert num_channels % group_size == 0 return num_channels // group_size return num_groups class GroupNormAct(nn.GroupNorm): # NOTE num_channel and num_groups order flipped for easier layer swaps / binding of fixed args def __init__( self, num_channels, num_groups=32, eps=1e-5, affine=True, group_size=None, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(GroupNormAct, self).__init__( _num_groups(num_channels, num_groups, group_size), num_channels, eps=eps, affine=affine, ) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class GroupNorm1Act(nn.GroupNorm): def __init__( self, num_channels, eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(GroupNorm1Act, self).__init__(1, num_channels, eps=eps, affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: x = F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct(nn.LayerNorm): def __init__( self, normalization_shape: Union[int, List[int], torch.Size], eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(LayerNormAct, self).__init__(normalization_shape, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() act_layer = get_act_layer(act_layer) # string -> nn.Module self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = self.drop(x) x = self.act(x) return x class LayerNormAct2d(nn.LayerNorm): def __init__( self, num_channels, eps=1e-5, affine=True, apply_act=True, act_layer=nn.ReLU, act_kwargs=None, inplace=True, drop_layer=None, ): super(LayerNormAct2d, self).__init__(num_channels, eps=eps, elementwise_affine=affine) self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act = _create_act(act_layer, act_kwargs=act_kwargs, inplace=inplace, apply_act=apply_act) self._fast_norm = is_fast_norm() def forward(self, x): x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) x = self.drop(x) x = self.act(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed_rel.py
""" Relative position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .mlp import Mlp from .weight_init import trunc_normal_ def gen_relative_position_index( q_size: Tuple[int, int], k_size: Optional[Tuple[int, int]] = None, class_token: bool = False, ) -> torch.Tensor: # Adapted with significant modifications from Swin / BeiT codebases # get pair-wise relative position index for each token inside the window if k_size is None: coords = torch.stack( torch.meshgrid([ torch.arange(q_size[0]), torch.arange(q_size[1]) ]) ).flatten(1) # 2, Wh, Ww relative_coords = coords[:, :, None] - coords[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 num_relative_distance = (2 * q_size[0] - 1) * (2 * q_size[1] - 1) + 3 else: # FIXME different q vs k sizes is a WIP, need to better offset the two grids? q_coords = torch.stack( torch.meshgrid([ torch.arange(q_size[0]), torch.arange(q_size[1]) ]) ).flatten(1) # 2, Wh, Ww k_coords = torch.stack( torch.meshgrid([ torch.arange(k_size[0]), torch.arange(k_size[1]) ]) ).flatten(1) relative_coords = q_coords[:, :, None] - k_coords[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 # relative_coords[:, :, 0] += max(q_size[0], k_size[0]) - 1 # shift to start from 0 # relative_coords[:, :, 1] += max(q_size[1], k_size[1]) - 1 # relative_coords[:, :, 0] *= k_size[1] + q_size[1] - 1 # relative_position_index = relative_coords.sum(-1) # Qh*Qw, Kh*Kw num_relative_distance = (q_size[0] + k_size[0] - 1) * (q_size[1] + q_size[1] - 1) + 3 _, relative_position_index = torch.unique(relative_coords.view(-1, 2), return_inverse=True, dim=0) if class_token: # handle cls to token & token 2 cls & cls to cls as per beit for rel pos bias # NOTE not intended or tested with MLP log-coords relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0]) relative_position_index[0, 0:] = num_relative_distance - 3 relative_position_index[0:, 0] = num_relative_distance - 2 relative_position_index[0, 0] = num_relative_distance - 1 return relative_position_index.contiguous() class RelPosBias(nn.Module): """ Relative Position Bias Adapted from Swin-V1 relative position bias impl, modularized. """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,) num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) self.register_buffer( "relative_position_index", gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0).view(-1), persistent=False, ) self.init_weights() def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[self.relative_position_index] # win_h * win_w, win_h * win_w, num_heads relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def gen_relative_log_coords( win_size: Tuple[int, int], pretrained_win_size: Tuple[int, int] = (0, 0), mode='swin', ): assert mode in ('swin', 'cr') # as per official swin-v2 impl, supporting timm specific 'cr' log coords as well relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0], dtype=torch.float32) relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1], dtype=torch.float32) relative_coords_table = torch.stack(torch.meshgrid([relative_coords_h, relative_coords_w])) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() # 2*Wh-1, 2*Ww-1, 2 if mode == 'swin': if pretrained_win_size[0] > 0: relative_coords_table[:, :, 0] /= (pretrained_win_size[0] - 1) relative_coords_table[:, :, 1] /= (pretrained_win_size[1] - 1) else: relative_coords_table[:, :, 0] /= (win_size[0] - 1) relative_coords_table[:, :, 1] /= (win_size[1] - 1) relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2( 1.0 + relative_coords_table.abs()) / math.log2(8) else: # mode == 'cr' relative_coords_table = torch.sign(relative_coords_table) * torch.log( 1.0 + relative_coords_table.abs()) return relative_coords_table class RelPosMlp(nn.Module): """ Log-Coordinate Relative Position MLP Based on ideas presented in Swin-V2 paper (https://arxiv.org/abs/2111.09883) This impl covers the 'swin' implementation as well as two timm specific modes ('cr', and 'rw') """ def __init__( self, window_size, num_heads=8, hidden_dim=128, prefix_tokens=0, mode='cr', pretrained_window_size=(0, 0) ): super().__init__() self.window_size = window_size self.window_area = self.window_size[0] * self.window_size[1] self.prefix_tokens = prefix_tokens self.num_heads = num_heads self.bias_shape = (self.window_area,) * 2 + (num_heads,) if mode == 'swin': self.bias_act = nn.Sigmoid() self.bias_gain = 16 mlp_bias = (True, False) else: self.bias_act = nn.Identity() self.bias_gain = None mlp_bias = True self.mlp = Mlp( 2, # x, y hidden_features=hidden_dim, out_features=num_heads, act_layer=nn.ReLU, bias=mlp_bias, drop=(0.125, 0.) ) self.register_buffer( "relative_position_index", gen_relative_position_index(window_size).view(-1), persistent=False) # get relative_coords_table self.register_buffer( "rel_coords_log", gen_relative_log_coords(window_size, pretrained_window_size, mode=mode), persistent=False) def get_bias(self) -> torch.Tensor: relative_position_bias = self.mlp(self.rel_coords_log) if self.relative_position_index is not None: relative_position_bias = relative_position_bias.view(-1, self.num_heads)[self.relative_position_index] relative_position_bias = relative_position_bias.view(self.bias_shape) relative_position_bias = relative_position_bias.permute(2, 0, 1) relative_position_bias = self.bias_act(relative_position_bias) if self.bias_gain is not None: relative_position_bias = self.bias_gain * relative_position_bias if self.prefix_tokens: relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0]) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def generate_lookup_tensor( length: int, max_relative_position: Optional[int] = None, ): """Generate a one_hot lookup tensor to reindex embeddings along one dimension. Args: length: the length to reindex to. max_relative_position: the maximum relative position to consider. Relative position embeddings for distances above this threshold are zeroed out. Returns: a lookup Tensor of size [length, length, vocab_size] that satisfies ret[n,m,v] = 1{m - n + max_relative_position = v}. """ if max_relative_position is None: max_relative_position = length - 1 # Return the cached lookup tensor, otherwise compute it and cache it. vocab_size = 2 * max_relative_position + 1 ret = torch.zeros(length, length, vocab_size) for i in range(length): for x in range(length): v = x - i + max_relative_position if abs(x - i) > max_relative_position: continue ret[i, x, v] = 1 return ret def reindex_2d_einsum_lookup( relative_position_tensor, height: int, width: int, height_lookup: torch.Tensor, width_lookup: torch.Tensor, ) -> torch.Tensor: """Reindex 2d relative position bias with 2 independent einsum lookups. Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py Args: relative_position_tensor: tensor of shape [..., vocab_height, vocab_width, ...]. height: height to reindex to. width: width to reindex to. height_lookup: one-hot height lookup width_lookup: one-hot width lookup Returns: reindexed_tensor: a Tensor of shape [..., height * width, height * width, ...] """ reindexed_tensor = torch.einsum('nhw,ixh->nixw', relative_position_tensor, height_lookup) reindexed_tensor = torch.einsum('nixw,jyw->nijxy', reindexed_tensor, width_lookup) area = height * width return reindexed_tensor.reshape(relative_position_tensor.shape[0], area, area) class RelPosBiasTf(nn.Module): """ Relative Position Bias Impl (Compatible with Tensorflow MaxViT models) Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.num_heads = num_heads vocab_height = 2 * window_size[0] - 1 vocab_width = 2 * window_size[1] - 1 self.bias_shape = (self.num_heads, vocab_height, vocab_width) self.relative_position_bias_table = nn.Parameter(torch.zeros(self.bias_shape)) self.register_buffer('height_lookup', generate_lookup_tensor(window_size[0]), persistent=False) self.register_buffer('width_lookup', generate_lookup_tensor(window_size[1]), persistent=False) self.init_weights() def init_weights(self): nn.init.normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: # FIXME change to not use one-hot/einsum? return reindex_2d_einsum_lookup( self.relative_position_bias_table, self.window_size[0], self.window_size[1], self.height_lookup, self.width_lookup ) def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/drop.py
""" DropBlock, DropPath PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. Papers: DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) Code: DropBlock impl inspired by two Tensorflow impl that I liked: - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F def drop_block_2d( x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. This layer has been tested on a few training runs with success, but needs further validation and possibly optimization for lower runtime impact. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) # seed_drop_rate, the gamma parameter gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) if batchwise: # one mask for whole batch, quite a bit faster uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) else: uniform_noise = torch.rand_like(x) block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) block_mask = -F.max_pool2d( -block_mask, kernel_size=clipped_block_size, # block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x def drop_block_fast_2d( x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) block_mask = torch.empty_like(x).bernoulli_(gamma) block_mask = F.max_pool2d( block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.empty_like(x).normal_() if inplace: x.mul_(1. - block_mask).add_(normal_noise * block_mask) else: x = x * (1. - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x class DropBlock2d(nn.Module): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf """ def __init__( self, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False, fast: bool = True): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast # FIXME finish comparisons of fast vs not def forward(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) else: return drop_block_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = x.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return x * random_tensor class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): super(DropPath, self).__init__() self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def forward(self, x): return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) def extra_repr(self): return f'drop_prob={round(self.drop_prob,3):0.3f}'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/helpers.py
""" Layer/Module Helpers Hacked together by / Copyright 2020 Ross Wightman """ from itertools import repeat import collections.abc # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): return tuple(x) return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) to_ntuple = _ntuple def make_divisible(v, divisor=8, min_value=None, round_limit=.9): min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < round_limit * v: new_v += divisor return new_v def extend_tuple(x, n): # pdas a tuple to specified n by padding with last value if not isinstance(x, (tuple, list)): x = (x,) else: x = tuple(x) pad_n = n - len(x) if pad_n <= 0: return x[:n] return x + (x[-1],) * pad_n
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/adaptive_avgmax_pool.py
""" PyTorch selectable adaptive pooling Adaptive pooling with the ability to select the type of pooling from: * 'avg' - Average pooling * 'max' - Max pooling * 'avgmax' - Sum of average and max pooling re-scaled by 0.5 * 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim Both a functional and a nn.Module version of the pooling is provided. Hacked together by / Copyright 2020 Ross Wightman """ from typing import Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from .format import get_spatial_dim, get_channel_dim _int_tuple_2_t = Union[int, Tuple[int, int]] def adaptive_pool_feat_mult(pool_type='avg'): if pool_type.endswith('catavgmax'): return 2 else: return 1 def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return 0.5 * (x_avg + x_max) def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1): x_avg = F.adaptive_avg_pool2d(x, output_size) x_max = F.adaptive_max_pool2d(x, output_size) return torch.cat((x_avg, x_max), 1) def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1): """Selectable global pooling function with dynamic input kernel size """ if pool_type == 'avg': x = F.adaptive_avg_pool2d(x, output_size) elif pool_type == 'avgmax': x = adaptive_avgmax_pool2d(x, output_size) elif pool_type == 'catavgmax': x = adaptive_catavgmax_pool2d(x, output_size) elif pool_type == 'max': x = F.adaptive_max_pool2d(x, output_size) else: assert False, 'Invalid pool type: %s' % pool_type return x class FastAdaptiveAvgPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'): super(FastAdaptiveAvgPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.mean(self.dim, keepdim=not self.flatten) class FastAdaptiveMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): return x.amax(self.dim, keepdim=not self.flatten) class FastAdaptiveAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveAvgMaxPool, self).__init__() self.flatten = flatten self.dim = get_spatial_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim, keepdim=not self.flatten) x_max = x.amax(self.dim, keepdim=not self.flatten) return 0.5 * x_avg + 0.5 * x_max class FastAdaptiveCatAvgMaxPool(nn.Module): def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'): super(FastAdaptiveCatAvgMaxPool, self).__init__() self.flatten = flatten self.dim_reduce = get_spatial_dim(input_fmt) if flatten: self.dim_cat = 1 else: self.dim_cat = get_channel_dim(input_fmt) def forward(self, x): x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten) x_max = x.amax(self.dim_reduce, keepdim=not self.flatten) return torch.cat((x_avg, x_max), self.dim_cat) class AdaptiveAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_avgmax_pool2d(x, self.output_size) class AdaptiveCatAvgMaxPool2d(nn.Module): def __init__(self, output_size: _int_tuple_2_t = 1): super(AdaptiveCatAvgMaxPool2d, self).__init__() self.output_size = output_size def forward(self, x): return adaptive_catavgmax_pool2d(x, self.output_size) class SelectAdaptivePool2d(nn.Module): """Selectable global pooling layer with dynamic input kernel size """ def __init__( self, output_size: _int_tuple_2_t = 1, pool_type: str = 'fast', flatten: bool = False, input_fmt: str = 'NCHW', ): super(SelectAdaptivePool2d, self).__init__() assert input_fmt in ('NCHW', 'NHWC') self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing if not pool_type: self.pool = nn.Identity() # pass through self.flatten = nn.Flatten(1) if flatten else nn.Identity() elif pool_type.startswith('fast') or input_fmt != 'NCHW': assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.' if pool_type.endswith('avgmax'): self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('catavgmax'): self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt) elif pool_type.endswith('max'): self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt) else: self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt) self.flatten = nn.Identity() else: assert input_fmt == 'NCHW' if pool_type == 'avgmax': self.pool = AdaptiveAvgMaxPool2d(output_size) elif pool_type == 'catavgmax': self.pool = AdaptiveCatAvgMaxPool2d(output_size) elif pool_type == 'max': self.pool = nn.AdaptiveMaxPool2d(output_size) else: self.pool = nn.AdaptiveAvgPool2d(output_size) self.flatten = nn.Flatten(1) if flatten else nn.Identity() def is_identity(self): return not self.pool_type def forward(self, x): x = self.pool(x) x = self.flatten(x) return x def feat_mult(self): return adaptive_pool_feat_mult(self.pool_type) def __repr__(self): return self.__class__.__name__ + ' (' \ + 'pool_type=' + self.pool_type \ + ', flatten=' + str(self.flatten) + ')'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/filter_response_norm.py
""" Filter Response Norm in PyTorch Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn from .create_act import create_act_layer from .trace_utils import _assert def inv_instance_rms(x, eps: float = 1e-5): rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) return rms.expand(x.shape) class FilterResponseNormTlu2d(nn.Module): def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): super(FilterResponseNormTlu2d, self).__init__() self.apply_act = apply_act # apply activation (non-linearity) self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.tau is not None: nn.init.zeros_(self.tau) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x class FilterResponseNormAct2d(nn.Module): def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): super(FilterResponseNormAct2d, self).__init__() if act_layer is not None and apply_act: self.act = create_act_layer(act_layer, inplace=inplace) else: self.act = nn.Identity() self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return self.act(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/lambda_layer.py
""" Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 @misc{2102.08602, Author = {Irwan Bello}, Title = {LambdaNetworks: Modeling Long-Range Interactions Without Attention}, Year = {2021}, } Status: This impl is a WIP. Code snippets in the paper were used as reference but good chance some details are missing/wrong. I've only implemented local lambda conv based pos embeddings. For a PyTorch impl that includes other embedding options checkout https://github.com/lucidrains/lambda-networks Hacked together by / Copyright 2021 Ross Wightman """ import torch from torch import nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ def rel_pos_indices(size): size = to_2tuple(size) pos = torch.stack(torch.meshgrid(torch.arange(size[0]), torch.arange(size[1]))).flatten(1) rel_pos = pos[:, None, :] - pos[:, :, None] rel_pos[0] += size[0] - 1 rel_pos[1] += size[1] - 1 return rel_pos # 2, H * W, H * W class LambdaLayer(nn.Module): """Lambda Layer Paper: `LambdaNetworks: Modeling Long-Range Interactions Without Attention` - https://arxiv.org/abs/2102.08602 NOTE: intra-depth parameter 'u' is fixed at 1. It did not appear worth the complexity to add. The internal dimensions of the lambda module are controlled via the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query (q) and key (k) dimension are determined by * dim_head = (dim_out * attn_ratio // num_heads) if dim_head is None * q = num_heads * dim_head, k = dim_head * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not set Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map for relative pos variant H, W stride (int): output stride of the module, avg pool used if stride == 2 num_heads (int): parallel attention heads. dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set r (int): local lambda convolution radius. Use lambda conv if set, else relative pos if not. (default: 9) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=16, r=9, qk_ratio=1.0, qkv_bias=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0, ' should be divided by num_heads' self.dim_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.num_heads = num_heads self.dim_v = dim_out // num_heads self.qkv = nn.Conv2d( dim, num_heads * self.dim_qk + self.dim_qk + self.dim_v, kernel_size=1, bias=qkv_bias) self.norm_q = nn.BatchNorm2d(num_heads * self.dim_qk) self.norm_v = nn.BatchNorm2d(self.dim_v) if r is not None: # local lambda convolution for pos self.conv_lambda = nn.Conv3d(1, self.dim_qk, (r, r, 1), padding=(r // 2, r // 2, 0)) self.pos_emb = None self.rel_pos_indices = None else: # relative pos embedding assert feat_size is not None feat_size = to_2tuple(feat_size) rel_size = [2 * s - 1 for s in feat_size] self.conv_lambda = None self.pos_emb = nn.Parameter(torch.zeros(rel_size[0], rel_size[1], self.dim_qk)) self.register_buffer('rel_pos_indices', rel_pos_indices(feat_size), persistent=False) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in if self.conv_lambda is not None: trunc_normal_(self.conv_lambda.weight, std=self.dim_qk ** -0.5) if self.pos_emb is not None: trunc_normal_(self.pos_emb, std=.02) def forward(self, x): B, C, H, W = x.shape M = H * W qkv = self.qkv(x) q, k, v = torch.split(qkv, [ self.num_heads * self.dim_qk, self.dim_qk, self.dim_v], dim=1) q = self.norm_q(q).reshape(B, self.num_heads, self.dim_qk, M).transpose(-1, -2) # B, num_heads, M, K v = self.norm_v(v).reshape(B, self.dim_v, M).transpose(-1, -2) # B, M, V k = F.softmax(k.reshape(B, self.dim_qk, M), dim=-1) # B, K, M content_lam = k @ v # B, K, V content_out = q @ content_lam.unsqueeze(1) # B, num_heads, M, V if self.pos_emb is None: position_lam = self.conv_lambda(v.reshape(B, 1, H, W, self.dim_v)) # B, H, W, V, K position_lam = position_lam.reshape(B, 1, self.dim_qk, H * W, self.dim_v).transpose(2, 3) # B, 1, M, K, V else: # FIXME relative pos embedding path not fully verified pos_emb = self.pos_emb[self.rel_pos_indices[0], self.rel_pos_indices[1]].expand(B, -1, -1, -1) position_lam = (pos_emb.transpose(-1, -2) @ v.unsqueeze(1)).unsqueeze(1) # B, 1, M, K, V position_out = (q.unsqueeze(-2) @ position_lam).squeeze(-2) # B, num_heads, M, V out = (content_out + position_out).transpose(-1, -2).reshape(B, C, H, W) # B, C (num_heads * V), H, W out = self.pool(out) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/selective_kernel.py
""" Selective Kernel Convolution/Attention Paper: Selective Kernel Networks (https://arxiv.org/abs/1903.06586) Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from .conv_bn_act import ConvNormActAa from .helpers import make_divisible from .trace_utils import _assert def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert k >= 3 and k % 2 class SelectiveKernelAttn(nn.Module): def __init__(self, channels, num_paths=2, attn_channels=32, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): """ Selective Kernel Attention Module Selective Kernel attention mechanism factored out into its own module. """ super(SelectiveKernelAttn, self).__init__() self.num_paths = num_paths self.fc_reduce = nn.Conv2d(channels, attn_channels, kernel_size=1, bias=False) self.bn = norm_layer(attn_channels) self.act = act_layer(inplace=True) self.fc_select = nn.Conv2d(attn_channels, channels * num_paths, kernel_size=1, bias=False) def forward(self, x): _assert(x.shape[1] == self.num_paths, '') x = x.sum(1).mean((2, 3), keepdim=True) x = self.fc_reduce(x) x = self.bn(x) x = self.act(x) x = self.fc_select(x) B, C, H, W = x.shape x = x.view(B, self.num_paths, C // self.num_paths, H, W) x = torch.softmax(x, dim=1) return x class SelectiveKernel(nn.Module): def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, dilation=1, groups=1, rd_ratio=1./16, rd_channels=None, rd_divisor=8, keep_3x3=True, split_input=True, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_layer=None): """ Selective Kernel Convolution Module As described in Selective Kernel Networks (https://arxiv.org/abs/1903.06586) with some modifications. Largest change is the input split, which divides the input channels across each convolution path, this can be viewed as a grouping of sorts, but the output channel counts expand to the module level value. This keeps the parameter count from ballooning when the convolutions themselves don't have groups, but still provides a noteworthy increase in performance over similar param count models without this attention layer. -Ross W Args: in_channels (int): module input (feature) channel count out_channels (int): module output (feature) channel count kernel_size (int, list): kernel size for each convolution branch stride (int): stride for convolutions dilation (int): dilation for module as a whole, impacts dilation of each branch groups (int): number of groups for each branch rd_ratio (int, float): reduction factor for attention features keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations split_input (bool): split input channels evenly across each convolution branch, keeps param count lower, can be viewed as grouping by path, output expands to module out_channels count act_layer (nn.Module): activation layer to use norm_layer (nn.Module): batchnorm/norm layer to use aa_layer (nn.Module): anti-aliasing module drop_layer (nn.Module): spatial drop module in convs (drop block, etc) """ super(SelectiveKernel, self).__init__() out_channels = out_channels or in_channels kernel_size = kernel_size or [3, 5] # default to one 3x3 and one 5x5 branch. 5x5 -> 3x3 + dilation _kernel_valid(kernel_size) if not isinstance(kernel_size, list): kernel_size = [kernel_size] * 2 if keep_3x3: dilation = [dilation * (k - 1) // 2 for k in kernel_size] kernel_size = [3] * len(kernel_size) else: dilation = [dilation] * len(kernel_size) self.num_paths = len(kernel_size) self.in_channels = in_channels self.out_channels = out_channels self.split_input = split_input if self.split_input: assert in_channels % self.num_paths == 0 in_channels = in_channels // self.num_paths groups = min(out_channels, groups) conv_kwargs = dict( stride=stride, groups=groups, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_layer=drop_layer) self.paths = nn.ModuleList([ ConvNormActAa(in_channels, out_channels, kernel_size=k, dilation=d, **conv_kwargs) for k, d in zip(kernel_size, dilation)]) attn_channels = rd_channels or make_divisible(out_channels * rd_ratio, divisor=rd_divisor) self.attn = SelectiveKernelAttn(out_channels, self.num_paths, attn_channels) def forward(self, x): if self.split_input: x_split = torch.split(x, self.in_channels // self.num_paths, 1) x_paths = [op(x_split[i]) for i, op in enumerate(self.paths)] else: x_paths = [op(x) for op in self.paths] x = torch.stack(x_paths, dim=1) x_attn = self.attn(x) x = x * x_attn x = torch.sum(x, dim=1) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/patch_dropout.py
from typing import Optional, Tuple, Union import torch import torch.nn as nn class PatchDropout(nn.Module): """ https://arxiv.org/abs/2212.00794 """ return_indices: torch.jit.Final[bool] def __init__( self, prob: float = 0.5, num_prefix_tokens: int = 1, ordered: bool = False, return_indices: bool = False, ): super().__init__() assert 0 <= prob < 1. self.prob = prob self.num_prefix_tokens = num_prefix_tokens # exclude CLS token (or other prefix tokens) self.ordered = ordered self.return_indices = return_indices def forward(self, x) -> Union[torch.Tensor, Tuple[torch.Tensor, Optional[torch.Tensor]]]: if not self.training or self.prob == 0.: if self.return_indices: return x, None return x if self.num_prefix_tokens: prefix_tokens, x = x[:, :self.num_prefix_tokens], x[:, self.num_prefix_tokens:] else: prefix_tokens = None B = x.shape[0] L = x.shape[1] num_keep = max(1, int(L * (1. - self.prob))) keep_indices = torch.argsort(torch.randn(B, L, device=x.device), dim=-1)[:, :num_keep] if self.ordered: # NOTE does not need to maintain patch order in typical transformer use, # but possibly useful for debug / visualization keep_indices = keep_indices.sort(dim=-1)[0] x = x.gather(1, keep_indices.unsqueeze(-1).expand((-1, -1) + x.shape[2:])) if prefix_tokens is not None: x = torch.cat((prefix_tokens, x), dim=1) if self.return_indices: return x, keep_indices return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/bottleneck_attn.py
""" Bottleneck Self Attention (Bottleneck Transformers) Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 @misc{2101.11605, Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, Title = {Bottleneck Transformers for Visual Recognition}, Year = {2021}, } Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 This impl is a WIP but given that it is based on the ref gist likely not too far off. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, heads, height, width, dim) rel_k: (2 * width - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, 2 * W -1) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, W - 1]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) x = x_pad[:, :W, W - 1:] # reshape and tile x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, feat_size, dim_head, scale): super().__init__() self.height, self.width = to_2tuple(feat_size) self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) def forward(self, q): B, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(B, self.height, self.width, -1) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, HW, HW) return rel_logits class BottleneckAttn(nn.Module): """ Bottleneck Attention Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). num_heads (int): parallel attention heads (default: 4) dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): super().__init__() assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' dim_out = dim_out or dim assert dim_out % num_heads == 0 self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) # NOTE I'm only supporting relative pos embedding for now self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H == self.pos_embed.height, '') _assert(W == self.pos_embed.width, '') x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) if self.scale_pos_embed: attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W else: attn = (q @ k) * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W out = self.pool(out) return out
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/split_batchnorm.py
""" Split BatchNorm A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through a separate BN layer. The first split is passed through the parent BN layers with weight/bias keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' namespace. This allows easily removing the auxiliary BN layers after training to efficiently achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, 'Disentangled Learning via An Auxiliary BN' Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn class SplitBatchNorm2d(torch.nn.BatchNorm2d): def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, num_splits=2): super().__init__(num_features, eps, momentum, affine, track_running_stats) assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' self.num_splits = num_splits self.aux_bn = nn.ModuleList([ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) def forward(self, input: torch.Tensor): if self.training: # aux BN only relevant while training split_size = input.shape[0] // self.num_splits assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" split_input = input.split(split_size) x = [super().forward(split_input[0])] for i, a in enumerate(self.aux_bn): x.append(a(split_input[i + 1])) return torch.cat(x, dim=0) else: return super().forward(input) def convert_splitbn_model(module, num_splits=2): """ Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. Args: module (torch.nn.Module): input module num_splits: number of separate batchnorm layers to split input across Example:: >>> # model is an instance of torch.nn.Module >>> model = timm.models.convert_splitbn_model(model, num_splits=2) """ mod = module if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): return module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SplitBatchNorm2d( module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, num_splits=num_splits) mod.running_mean = module.running_mean mod.running_var = module.running_var mod.num_batches_tracked = module.num_batches_tracked if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for aux in mod.aux_bn: aux.running_mean = module.running_mean.clone() aux.running_var = module.running_var.clone() aux.num_batches_tracked = module.num_batches_tracked.clone() if module.affine: aux.weight.data = module.weight.data.clone().detach() aux.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) del module return mod
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/conv2d_same.py
""" Conv2d w/ Same Padding Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple, Optional from .config import is_exportable, is_scriptable from .padding import pad_same, pad_same_arg, get_padding_value _USE_EXPORT_CONV = False def conv2d_same( x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1), padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1, ): x = pad_same(x, weight.shape[-2:], stride, dilation) return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) class Conv2dSame(nn.Conv2d): """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSame, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) def forward(self, x): return conv2d_same( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) class Conv2dSameExport(nn.Conv2d): """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions NOTE: This does not currently work with torch.jit.script """ # pylint: disable=unused-argument def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, ): super(Conv2dSameExport, self).__init__( in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, ) self.pad = None self.pad_input_size = (0, 0) def forward(self, x): input_size = x.size()[-2:] if self.pad is None: pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) self.pad = nn.ZeroPad2d(pad_arg) self.pad_input_size = input_size x = self.pad(x) return F.conv2d( x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, ) def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop('padding', '') kwargs.setdefault('bias', False) padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) if is_dynamic: if _USE_EXPORT_CONV and is_exportable(): # older PyTorch ver needed this to export same padding reasonably assert not is_scriptable() # Conv2DSameExport does not work with jit return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) else: return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) else: return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/grn.py
""" Global Response Normalization Module Based on the GRN layer presented in `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 This implementation * works for both NCHW and NHWC tensor layouts * uses affine param names matching existing torch norm layers * slightly improves eager mode performance via fused addcmul Hacked together by / Copyright 2023 Ross Wightman """ import torch from torch import nn as nn class GlobalResponseNorm(nn.Module): """ Global Response Normalization layer """ def __init__(self, dim, eps=1e-6, channels_last=True): super().__init__() self.eps = eps if channels_last: self.spatial_dim = (1, 2) self.channel_dim = -1 self.wb_shape = (1, 1, 1, -1) else: self.spatial_dim = (2, 3) self.channel_dim = 1 self.wb_shape = (1, -1, 1, 1) self.weight = nn.Parameter(torch.zeros(dim)) self.bias = nn.Parameter(torch.zeros(dim)) def forward(self, x): x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/inplace_abn.py
import torch from torch import nn as nn try: from inplace_abn.functions import inplace_abn, inplace_abn_sync has_iabn = True except ImportError: has_iabn = False def inplace_abn(x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): raise ImportError( "Please install InplaceABN:'pip install git+https://github.com/mapillary/inplace_abn.git@v1.0.12'") def inplace_abn_sync(**kwargs): inplace_abn(**kwargs) class InplaceAbn(nn.Module): """Activated Batch Normalization This gathers a BatchNorm and an activation function in a single module Parameters ---------- num_features : int Number of feature channels in the input and output. eps : float Small constant to prevent numerical issues. momentum : float Momentum factor applied to compute running statistics. affine : bool If `True` apply learned scale and shift transformation after normalization. act_layer : str or nn.Module type Name or type of the activation functions, one of: `leaky_relu`, `elu` act_param : float Negative slope for the `leaky_relu` activation. """ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, act_layer="leaky_relu", act_param=0.01, drop_layer=None): super(InplaceAbn, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.momentum = momentum if apply_act: if isinstance(act_layer, str): assert act_layer in ('leaky_relu', 'elu', 'identity', '') self.act_name = act_layer if act_layer else 'identity' else: # convert act layer passed as type to string if act_layer == nn.ELU: self.act_name = 'elu' elif act_layer == nn.LeakyReLU: self.act_name = 'leaky_relu' elif act_layer is None or act_layer == nn.Identity: self.act_name = 'identity' else: assert False, f'Invalid act layer {act_layer.__name__} for IABN' else: self.act_name = 'identity' self.act_param = act_param if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.running_mean, 0) nn.init.constant_(self.running_var, 1) if self.affine: nn.init.constant_(self.weight, 1) nn.init.constant_(self.bias, 0) def forward(self, x): output = inplace_abn( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.act_name, self.act_param) if isinstance(output, tuple): output = output[0] return output
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_norm_act.py
""" NormAct (Normalizaiton + Activation Layer) Factory Create norm + act combo modules that attempt to be backwards compatible with separate norm + act isntances in models. Where these are used it will be possible to swap separate BN + act layers with combined modules like IABN or EvoNorms. Hacked together by / Copyright 2020 Ross Wightman """ import types import functools from .evo_norm import * from .filter_response_norm import FilterResponseNormAct2d, FilterResponseNormTlu2d from .norm_act import BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d from .inplace_abn import InplaceAbn _NORM_ACT_MAP = dict( batchnorm=BatchNormAct2d, batchnorm2d=BatchNormAct2d, groupnorm=GroupNormAct, groupnorm1=functools.partial(GroupNormAct, num_groups=1), layernorm=LayerNormAct, layernorm2d=LayerNormAct2d, evonormb0=EvoNorm2dB0, evonormb1=EvoNorm2dB1, evonormb2=EvoNorm2dB2, evonorms0=EvoNorm2dS0, evonorms0a=EvoNorm2dS0a, evonorms1=EvoNorm2dS1, evonorms1a=EvoNorm2dS1a, evonorms2=EvoNorm2dS2, evonorms2a=EvoNorm2dS2a, frn=FilterResponseNormAct2d, frntlu=FilterResponseNormTlu2d, inplaceabn=InplaceAbn, iabn=InplaceAbn, ) _NORM_ACT_TYPES = {m for n, m in _NORM_ACT_MAP.items()} # has act_layer arg to define act type _NORM_ACT_REQUIRES_ARG = { BatchNormAct2d, GroupNormAct, LayerNormAct, LayerNormAct2d, FilterResponseNormAct2d, InplaceAbn} def create_norm_act_layer(layer_name, num_features, act_layer=None, apply_act=True, jit=False, **kwargs): layer = get_norm_act_layer(layer_name, act_layer=act_layer) layer_instance = layer(num_features, apply_act=apply_act, **kwargs) if jit: layer_instance = torch.jit.script(layer_instance) return layer_instance def get_norm_act_layer(norm_layer, act_layer=None): assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) assert act_layer is None or isinstance(act_layer, (type, str, types.FunctionType, functools.partial)) norm_act_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_act_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): layer_name = norm_layer.replace('_', '').lower().split('-')[0] norm_act_layer = _NORM_ACT_MAP.get(layer_name, None) elif norm_layer in _NORM_ACT_TYPES: norm_act_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): # if function type, must be a lambda/fn that creates a norm_act layer norm_act_layer = norm_layer else: type_name = norm_layer.__name__.lower() if type_name.startswith('batchnorm'): norm_act_layer = BatchNormAct2d elif type_name.startswith('groupnorm'): norm_act_layer = GroupNormAct elif type_name.startswith('groupnorm1'): norm_act_layer = functools.partial(GroupNormAct, num_groups=1) elif type_name.startswith('layernorm2d'): norm_act_layer = LayerNormAct2d elif type_name.startswith('layernorm'): norm_act_layer = LayerNormAct else: assert False, f"No equivalent norm_act layer for {type_name}" if norm_act_layer in _NORM_ACT_REQUIRES_ARG: # pass `act_layer` through for backwards compat where `act_layer=None` implies no activation. # In the future, may force use of `apply_act` with `act_layer` arg bound to relevant NormAct types norm_act_kwargs.setdefault('act_layer', act_layer) if norm_act_kwargs: norm_act_layer = functools.partial(norm_act_layer, **norm_act_kwargs) # bind/rebind args return norm_act_layer
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/format.py
from enum import Enum from typing import Union import torch class Format(str, Enum): NCHW = 'NCHW' NHWC = 'NHWC' NCL = 'NCL' NLC = 'NLC' FormatT = Union[str, Format] def get_spatial_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NLC: dim = (1,) elif fmt is Format.NCL: dim = (2,) elif fmt is Format.NHWC: dim = (1, 2) else: dim = (2, 3) return dim def get_channel_dim(fmt: FormatT): fmt = Format(fmt) if fmt is Format.NHWC: dim = 3 elif fmt is Format.NLC: dim = 2 else: dim = 1 return dim def nchw_to(x: torch.Tensor, fmt: Format): if fmt == Format.NHWC: x = x.permute(0, 2, 3, 1) elif fmt == Format.NLC: x = x.flatten(2).transpose(1, 2) elif fmt == Format.NCL: x = x.flatten(2) return x def nhwc_to(x: torch.Tensor, fmt: Format): if fmt == Format.NCHW: x = x.permute(0, 3, 1, 2) elif fmt == Format.NLC: x = x.flatten(1, 2) elif fmt == Format.NCL: x = x.flatten(1, 2).transpose(1, 2) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/halo_attn.py
""" Halo Self Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 @misc{2103.12731, Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and Jonathon Shlens}, Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, Year = {2021}, } Status: This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. The attention mechanism works but it's slow as implemented. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch from torch import nn import torch.nn.functional as F from .helpers import make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, height, width, dim) rel_k: (2 * window - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape rel_size = rel_k.shape[0] win_size = (rel_size + 1) // 2 x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, rel_size) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, rel_size - W]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, rel_size) x = x_pad[:, :W, win_size - 1:] # reshape and tile x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, block_size, win_size, dim_head, scale): """ Args: block_size (int): block size win_size (int): neighbourhood window size dim_head (int): attention head dim scale (float): scale factor (for init) """ super().__init__() self.block_size = block_size self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) def forward(self, q): B, BB, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, BB, HW, -1) return rel_logits class HaloAttn(nn.Module): """ Halo Attention Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` - https://arxiv.org/abs/2103.12731 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) stride: output stride of the module, query downscaled if > 1 (default: 1). num_heads: parallel attention heads (default: 8). dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set block_size (int): size of blocks. (default: 8) halo_size (int): size of halo overlap. (default: 3) qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool) : add bias to q, k, and v projections avg_down (bool): use average pool downsample instead of strided query blocks scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): super().__init__() dim_out = dim_out or dim assert dim_out % num_heads == 0 assert stride in (1, 2) self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.block_size = self.block_size_ds = block_size self.halo_size = halo_size self.win_size = block_size + halo_size * 2 # neighbourhood window size self.block_stride = 1 use_avg_pool = False if stride > 1: use_avg_pool = avg_down or block_size % stride != 0 self.block_stride = 1 if use_avg_pool else stride self.block_size_ds = self.block_size // self.block_stride # FIXME not clear if this stride behaviour is what the paper intended # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving # data in unfolded block form. I haven't wrapped my head around how that'd look. self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) self.pos_embed = PosEmbedRel( block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() self.reset_parameters() def reset_parameters(self): std = self.q.weight.shape[1] ** -0.5 # fan-in trunc_normal_(self.q.weight, std=std) trunc_normal_(self.kv.weight, std=std) trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H % self.block_size == 0, '') _assert(W % self.block_size == 0, '') num_h_blocks = H // self.block_size num_w_blocks = W // self.block_size num_blocks = num_h_blocks * num_w_blocks q = self.q(x) # unfold q = q.reshape( -1, self.dim_head_qk, num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) # B, num_heads * dim_head * block_size ** 2, num_blocks q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) # B * num_heads, num_blocks, block_size ** 2, dim_head kv = self.kv(x) # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. # FIXME figure out how to switch impl between this and conv2d if XLA being used. kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v if self.scale_pos_embed: attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale else: attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 attn = attn.softmax(dim=-1) out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks # fold out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) out = out.permute(0, 3, 1, 4, 2).contiguous().view( B, self.dim_out_v, H // self.block_stride, W // self.block_stride) # B, dim_out, H // block_stride, W // block_stride out = self.pool(out) return out """ Three alternatives for overlapping windows. `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() if is_xla: # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. WW = self.win_size ** 2 pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) elif self.stride_tricks: kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() kv = kv.as_strided(( B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) else: kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) kv = kv.reshape( B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) """
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/non_local_attn.py
""" Bilinear-Attention-Transform and Non-Local Attention Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms` - https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification """ import torch from torch import nn from torch.nn import functional as F from .conv_bn_act import ConvNormAct from .helpers import make_divisible from .trace_utils import _assert class NonLocalAttn(nn.Module): """Spatial NL block for image classification. This was adapted from https://github.com/BA-Transform/BAT-Image-Classification Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net. """ def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs): super(NonLocalAttn, self).__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.scale = in_channels ** -0.5 if use_scale else 1.0 self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True) self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True) self.norm = nn.BatchNorm2d(in_channels) self.reset_parameters() def forward(self, x): shortcut = x t = self.t(x) p = self.p(x) g = self.g(x) B, C, H, W = t.size() t = t.view(B, C, -1).permute(0, 2, 1) p = p.view(B, C, -1) g = g.view(B, C, -1).permute(0, 2, 1) att = torch.bmm(t, p) * self.scale att = F.softmax(att, dim=2) x = torch.bmm(att, g) x = x.permute(0, 2, 1).reshape(B, C, H, W) x = self.z(x) x = self.norm(x) + shortcut return x def reset_parameters(self): for name, m in self.named_modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_( m.weight, mode='fan_out', nonlinearity='relu') if len(list(m.parameters())) > 1: nn.init.constant_(m.bias, 0.0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.GroupNorm): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) class BilinearAttnTransform(nn.Module): def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d): super(BilinearAttnTransform, self).__init__() self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer) self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1)) self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size)) self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.block_size = block_size self.groups = groups self.in_channels = in_channels def resize_mat(self, x, t: int): B, C, block_size, block_size1 = x.shape _assert(block_size == block_size1, '') if t <= 1: return x x = x.view(B * C, -1, 1, 1) x = x * torch.eye(t, t, dtype=x.dtype, device=x.device) x = x.view(B * C, block_size, block_size, t, t) x = torch.cat(torch.split(x, 1, dim=1), dim=3) x = torch.cat(torch.split(x, 1, dim=2), dim=4) x = x.view(B, C, block_size * t, block_size * t) return x def forward(self, x): _assert(x.shape[-1] % self.block_size == 0, '') _assert(x.shape[-2] % self.block_size == 0, '') B, C, H, W = x.shape out = self.conv1(x) rp = F.adaptive_max_pool2d(out, (self.block_size, 1)) cp = F.adaptive_max_pool2d(out, (1, self.block_size)) p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid() q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid() p = p / p.sum(dim=3, keepdim=True) q = q / q.sum(dim=2, keepdim=True) p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() p = p.view(B, C, self.block_size, self.block_size) q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size( 0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous() q = q.view(B, C, self.block_size, self.block_size) p = self.resize_mat(p, H // self.block_size) q = self.resize_mat(q, W // self.block_size) y = p.matmul(x) y = y.matmul(q) y = self.conv2(y) return y class BatNonLocalAttn(nn.Module): """ BAT Adapted from: https://github.com/BA-Transform/BAT-Image-Classification """ def __init__( self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_): super().__init__() if rd_channels is None: rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor) self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer) self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer) self.dropout = nn.Dropout2d(p=drop_rate) def forward(self, x): xl = self.conv1(x) y = self.ba(xl) y = self.conv2(y) y = self.dropout(y) return y + x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/attention_pool2d.py
""" Attention Pool 2D Implementations of 2D spatial feature pooling using multi-head attention instead of average pool. Based on idea in CLIP by OpenAI, licensed Apache 2.0 https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py Hacked together by / Copyright 2021 Ross Wightman """ from typing import Union, Tuple import torch import torch.nn as nn from .helpers import to_2tuple from .pos_embed_sincos import apply_rot_embed, RotaryEmbedding from .weight_init import trunc_normal_ class RotAttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ rotary (relative) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. Adapted from the AttentionPool2d in CLIP w/ rotary embedding instead of learned embed. https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: While this impl does not require a fixed feature size, performance at differeing resolutions from train varies widely and falls off dramatically. I'm not sure if there is a way around this... -RW """ def __init__( self, in_features: int, out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads assert embed_dim % num_heads == 0 self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 self.pos_embed = RotaryEmbedding(self.head_dim) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] qc, q = q[:, :, :1], q[:, :, 1:] sin_emb, cos_emb = self.pos_embed.get_embed((H, W)) q = apply_rot_embed(q, sin_emb, cos_emb) q = torch.cat([qc, q], dim=2) kc, k = k[:, :, :1], k[:, :, 1:] k = apply_rot_embed(k, sin_emb, cos_emb) k = torch.cat([kc, k], dim=2) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0] class AttentionPool2d(nn.Module): """ Attention based 2D feature pooling w/ learned (absolute) pos embedding. This is a multi-head attention based replacement for (spatial) average pooling in NN architectures. It was based on impl in CLIP by OpenAI https://github.com/openai/CLIP/blob/3b473b0e682c091a9e53623eebc1ca1657385717/clip/model.py NOTE: This requires feature size upon construction and well prevent adaptive sizing of the network. """ def __init__( self, in_features: int, feat_size: Union[int, Tuple[int, int]], out_features: int = None, embed_dim: int = None, num_heads: int = 4, qkv_bias: bool = True, ): super().__init__() embed_dim = embed_dim or in_features out_features = out_features or in_features assert embed_dim % num_heads == 0 self.feat_size = to_2tuple(feat_size) self.qkv = nn.Linear(in_features, embed_dim * 3, bias=qkv_bias) self.proj = nn.Linear(embed_dim, out_features) self.num_heads = num_heads self.head_dim = embed_dim // num_heads self.scale = self.head_dim ** -0.5 spatial_dim = self.feat_size[0] * self.feat_size[1] self.pos_embed = nn.Parameter(torch.zeros(spatial_dim + 1, in_features)) trunc_normal_(self.pos_embed, std=in_features ** -0.5) trunc_normal_(self.qkv.weight, std=in_features ** -0.5) nn.init.zeros_(self.qkv.bias) def forward(self, x): B, _, H, W = x.shape N = H * W assert self.feat_size[0] == H assert self.feat_size[1] == W x = x.reshape(B, -1, N).permute(0, 2, 1) x = torch.cat([x.mean(1, keepdim=True), x], dim=1) x = x + self.pos_embed.unsqueeze(0).to(x.dtype) x = self.qkv(x).reshape(B, N + 1, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) q, k, v = x[0], x[1], x[2] attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) x = (attn @ v).transpose(1, 2).reshape(B, N + 1, -1) x = self.proj(x) return x[:, 0]
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pos_embed.py
""" Position Embedding Utilities Hacked together by / Copyright 2022 Ross Wightman """ import logging import math from typing import List, Tuple, Optional, Union import torch import torch.nn.functional as F from .helpers import to_2tuple _logger = logging.getLogger(__name__) def resample_abs_pos_embed( posemb, new_size: List[int], old_size: Optional[List[int]] = None, num_prefix_tokens: int = 1, interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): # sort out sizes, assume square if old size not provided num_pos_tokens = posemb.shape[1] num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]: return posemb if not old_size: hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens)) old_size = hw, hw if num_prefix_tokens: posemb_prefix, posemb = posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:] else: posemb_prefix, posemb = None, posemb # do the interpolation embed_dim = posemb.shape[-1] posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim) # add back extra (class, etc) prefix tokens if posemb_prefix is not None: posemb = torch.cat([posemb_prefix, posemb], dim=1) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {old_size} to {new_size}.') return posemb def resample_abs_pos_embed_nhwc( posemb, new_size: List[int], interpolation: str = 'bicubic', antialias: bool = True, verbose: bool = False, ): if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]: return posemb # do the interpolation posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2) posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias) posemb = posemb.permute(0, 2, 3, 1) if not torch.jit.is_scripting() and verbose: _logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.') return posemb
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/eca.py
""" ECA module from ECAnet paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks https://arxiv.org/abs/1910.03151 Original ECA model borrowed from https://github.com/BangguWu/ECANet Modified circular ECA implementation and adaption for use in timm package by Chris Ha https://github.com/VRandme Original License: MIT License Copyright (c) 2019 BangguWu, Qilong Wang Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import math from torch import nn import torch.nn.functional as F from .create_act import create_act_layer from .helpers import make_divisible class EcaModule(nn.Module): """Constructs an ECA module. Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) gamm: used in kernel_size calc, see above beta: used in kernel_size calc, see above act_layer: optional non-linearity after conv, enables conv bias, this is an experiment gate_layer: gating non-linearity to use """ def __init__( self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): super(EcaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) assert kernel_size % 2 == 1 padding = (kernel_size - 1) // 2 if use_mlp: # NOTE 'mlp' mode is a timm experiment, not in paper assert channels is not None if rd_channels is None: rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) act_layer = act_layer or nn.ReLU self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) self.act = create_act_layer(act_layer) self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) else: self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) self.act = None self.conv2 = None self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv y = self.conv(y) if self.conv2 is not None: y = self.act(y) y = self.conv2(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) EfficientChannelAttn = EcaModule # alias class CecaModule(nn.Module): """Constructs a circular ECA module. ECA module where the conv uses circular padding rather than zero padding. Unlike the spatial dimension, the channels do not have inherent ordering nor locality. Although this module in essence, applies such an assumption, it is unnecessary to limit the channels on either "edge" from being circularly adapted to each other. This will fundamentally increase connectivity and possibly increase performance metrics (accuracy, robustness), without significantly impacting resource metrics (parameter size, throughput,latency, etc) Args: channels: Number of channels of the input feature map for use in adaptive kernel sizes for actual calculations according to channel. gamma, beta: when channel is given parameters of mapping function refer to original paper https://arxiv.org/pdf/1910.03151.pdf (default=None. if channel size not given, use k_size given for kernel size.) kernel_size: Adaptive selection of kernel size (default=3) gamm: used in kernel_size calc, see above beta: used in kernel_size calc, see above act_layer: optional non-linearity after conv, enables conv bias, this is an experiment gate_layer: gating non-linearity to use """ def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): super(CecaModule, self).__init__() if channels is not None: t = int(abs(math.log(channels, 2) + beta) / gamma) kernel_size = max(t if t % 2 else t + 1, 3) has_act = act_layer is not None assert kernel_size % 2 == 1 # PyTorch circular padding mode is buggy as of pytorch 1.4 # see https://github.com/pytorch/pytorch/pull/17240 # implement manual circular padding self.padding = (kernel_size - 1) // 2 self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) self.gate = create_act_layer(gate_layer) def forward(self, x): y = x.mean((2, 3)).view(x.shape[0], 1, -1) # Manually implement circular padding, F.pad does not seemed to be bugged y = F.pad(y, (self.padding, self.padding), mode='circular') y = self.conv(y) y = self.gate(y).view(x.shape[0], -1, 1, 1) return x * y.expand_as(x) CircularEfficientChannelAttn = CecaModule
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations_me.py
""" Activations (memory-efficient w/ custom autograd) A collection of activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. These activations are not compatible with jit scripting or ONNX export of the model, please use either the JIT or basic versions of the activations. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit_fwd(x): return x.mul(torch.sigmoid(x)) @torch.jit.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishJitAutoFn(torch.autograd.Function): """ torch.jit.script optimised Swish w/ memory-efficient checkpoint Inspired by conversation btw Jeremy Howard & Adam Pazske https://twitter.com/jeremyphoward/status/1188251041835315200 """ @staticmethod def symbolic(g, x): return g.op("Mul", x, g.op("Sigmoid", x)) @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishJitAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool = False): super(SwishMe, self).__init__() def forward(self, x): return SwishJitAutoFn.apply(x) @torch.jit.script def mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x))) @torch.jit.script def mish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishJitAutoFn(torch.autograd.Function): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 A memory efficient, jit scripted variant of Mish """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output) def mish_me(x, inplace=False): return MishJitAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool = False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x) @torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool = False): return (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. return grad_output * m class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool = False): return HardSigmoidJitAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x) @torch.jit.script def hard_swish_jit_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.) m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) return grad_output * m class HardSwishJitAutoFn(torch.autograd.Function): """A memory efficient, jit-scripted HardSwish activation""" @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) @staticmethod def symbolic(g, self): input = g.op("Add", self, g.op('Constant', value_t=torch.tensor(3, dtype=torch.float))) hardtanh_ = g.op("Clip", input, g.op('Constant', value_t=torch.tensor(0, dtype=torch.float)), g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) hardtanh_ = g.op("Div", hardtanh_, g.op('Constant', value_t=torch.tensor(6, dtype=torch.float))) return g.op("Mul", self, hardtanh_) def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x) @torch.jit.script def hard_mish_jit_fwd(x): return 0.5 * x * (x + 2).clamp(min=0, max=2) @torch.jit.script def hard_mish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= -2.) m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) return grad_output * m class HardMishJitAutoFn(torch.autograd.Function): """ A memory efficient, jit scripted variant of Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_mish_jit_bwd(x, grad_output) def hard_mish_me(x, inplace: bool = False): return HardMishJitAutoFn.apply(x) class HardMishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardMishMe, self).__init__() def forward(self, x): return HardMishJitAutoFn.apply(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/norm.py
""" Normalization layers and wrappers Norm layer definitions that support fast norm and consistent channel arg order (always first arg). Hacked together by / Copyright 2022 Ross Wightman """ import numbers from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from .fast_norm import is_fast_norm, fast_group_norm, fast_layer_norm, fast_rms_norm class GroupNorm(nn.GroupNorm): def __init__(self, num_channels, num_groups=32, eps=1e-5, affine=True): # NOTE num_channels is swapped to first arg for consistency in swapping norm layers with BN super().__init__(num_groups, num_channels, eps=eps, affine=affine) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x): if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class GroupNorm1(nn.GroupNorm): """ Group Normalization with 1 group. Input: tensor in shape [B, C, *] """ def __init__(self, num_channels, **kwargs): super().__init__(1, num_channels, **kwargs) self.fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.fast_norm: return fast_group_norm(x, self.num_groups, self.weight, self.bias, self.eps) else: return F.group_norm(x, self.num_groups, self.weight, self.bias, self.eps) class LayerNorm(nn.LayerNorm): """ LayerNorm w/ fast norm option """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x class LayerNorm2d(nn.LayerNorm): """ LayerNorm for channels of '2D' spatial NCHW tensors """ def __init__(self, num_channels, eps=1e-6, affine=True): super().__init__(num_channels, eps=eps, elementwise_affine=affine) self._fast_norm = is_fast_norm() # can't script unless we have these flags here (no globals) def forward(self, x: torch.Tensor) -> torch.Tensor: x = x.permute(0, 2, 3, 1) if self._fast_norm: x = fast_layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) else: x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) x = x.permute(0, 3, 1, 2) return x def _is_contiguous(tensor: torch.Tensor) -> bool: # jit is oh so lovely :/ if torch.jit.is_scripting(): return tensor.is_contiguous() else: return tensor.is_contiguous(memory_format=torch.contiguous_format) @torch.jit.script def _layer_norm_cf(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): s, u = torch.var_mean(x, dim=1, unbiased=False, keepdim=True) x = (x - u) * torch.rsqrt(s + eps) x = x * weight[:, None, None] + bias[:, None, None] return x def _layer_norm_cf_sqm(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, eps: float): u = x.mean(dim=1, keepdim=True) s = ((x * x).mean(dim=1, keepdim=True) - (u * u)).clamp(0) x = (x - u) * torch.rsqrt(s + eps) x = x * weight.view(1, -1, 1, 1) + bias.view(1, -1, 1, 1) return x class LayerNormExp2d(nn.LayerNorm): """ LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W). Experimental implementation w/ manual norm for tensors non-contiguous tensors. This improves throughput in some scenarios (tested on Ampere GPU), esp w/ channels_last layout. However, benefits are not always clear and can perform worse on other GPUs. """ def __init__(self, num_channels, eps=1e-6): super().__init__(num_channels, eps=eps) def forward(self, x) -> torch.Tensor: if _is_contiguous(x): x = F.layer_norm( x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2) else: x = _layer_norm_cf(x, self.weight, self.bias, self.eps) return x class RmsNorm(nn.Module): """ RmsNorm w/ fast (apex) norm if available """ __constants__ = ['normalized_shape', 'eps', 'elementwise_affine'] normalized_shape: Tuple[int, ...] eps: float elementwise_affine: bool def __init__(self, channels, eps=1e-6, affine=True, device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() normalized_shape = channels if isinstance(normalized_shape, numbers.Integral): # mypy error: incompatible types in assignment normalized_shape = (normalized_shape,) # type: ignore[assignment] self.normalized_shape = tuple(normalized_shape) # type: ignore[arg-type] self.eps = eps self.elementwise_affine = affine if self.elementwise_affine: self.weight = nn.Parameter(torch.empty(self.normalized_shape, **factory_kwargs)) else: self.register_parameter('weight', None) self.reset_parameters() def reset_parameters(self) -> None: if self.elementwise_affine: nn.init.ones_(self.weight) def forward(self, x: torch.Tensor) -> torch.Tensor: # NOTE fast norm fallback needs our rms norm impl, so both paths through here. # Since there is no built-in PyTorch impl, always use APEX RmsNorm if is installed. x = fast_rms_norm(x, self.normalized_shape, self.weight, self.eps) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/trace_utils.py
try: from torch import _assert except ImportError: def _assert(condition: bool, message: str): assert condition, message def _float_to_int(x: float) -> int: """ Symbolic tracing helper to substitute for inbuilt `int`. Hint: Inbuilt `int` can't accept an argument of type `Proxy` """ return int(x)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/fast_norm.py
""" 'Fast' Normalization Functions For GroupNorm and LayerNorm these functions bypass typical AMP upcast to float32. Additionally, for LayerNorm, the APEX fused LN is used if available (which also does not upcast) Hacked together by / Copyright 2022 Ross Wightman """ from typing import List, Optional import torch from torch.nn import functional as F try: from apex.normalization.fused_layer_norm import fused_layer_norm_affine has_apex = True except ImportError: has_apex = False try: from apex.normalization.fused_layer_norm import fused_rms_norm_affine, fused_rms_norm has_apex_rmsnorm = True except ImportError: has_apex_rmsnorm = False # fast (ie lower precision LN) can be disabled with this flag if issues crop up _USE_FAST_NORM = False # defaulting to False for now def is_fast_norm(): return _USE_FAST_NORM def set_fast_norm(enable=True): global _USE_FAST_NORM _USE_FAST_NORM = enable def fast_group_norm( x: torch.Tensor, num_groups: int, weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.group_norm(x, num_groups, weight, bias, eps) if torch.is_autocast_enabled(): # normally native AMP casts GN inputs to float32 # here we use the low precision autocast dtype # FIXME what to do re CPU autocast? dt = torch.get_autocast_gpu_dtype() x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.group_norm(x, num_groups, weight, bias, eps) def fast_layer_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, bias: Optional[torch.Tensor] = None, eps: float = 1e-5 ) -> torch.Tensor: if torch.jit.is_scripting(): # currently cannot use is_autocast_enabled within torchscript return F.layer_norm(x, normalized_shape, weight, bias, eps) if has_apex: return fused_layer_norm_affine(x, weight, bias, normalized_shape, eps) if torch.is_autocast_enabled(): # normally native AMP casts LN inputs to float32 # apex LN does not, this is behaving like Apex dt = torch.get_autocast_gpu_dtype() # FIXME what to do re CPU autocast? x, weight, bias = x.to(dt), weight.to(dt), bias.to(dt) if bias is not None else None with torch.cuda.amp.autocast(enabled=False): return F.layer_norm(x, normalized_shape, weight, bias, eps) def rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ): norm_ndim = len(normalized_shape) if torch.jit.is_scripting(): # ndim = len(x.shape) # dims = list(range(ndim - norm_ndim, ndim)) # this doesn't work on pytorch <= 1.13.x # NOTE -ve dims cause torchscript to crash in some cases, out of options to work around assert norm_ndim == 1 v = torch.var(x, dim=-1).unsqueeze(-1) # ts crashes with -ve dim + keepdim=True else: dims = tuple(range(-1, -norm_ndim - 1, -1)) v = torch.var(x, dim=dims, keepdim=True) x = x * torch.rsqrt(v + eps) if weight is not None: x = x * weight return x def fast_rms_norm( x: torch.Tensor, normalized_shape: List[int], weight: Optional[torch.Tensor] = None, eps: float = 1e-5, ) -> torch.Tensor: if torch.jit.is_scripting(): # this must be by itself, cannot merge with has_apex_rmsnorm return rms_norm(x, normalized_shape, weight, eps) if has_apex_rmsnorm: if weight is None: return fused_rms_norm(x, normalized_shape, eps) else: return fused_rms_norm_affine(x, weight, normalized_shape, eps) # fallback return rms_norm(x, normalized_shape, weight, eps)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/classifier.py
""" Classifier head and layer factory Hacked together by / Copyright 2020 Ross Wightman """ from collections import OrderedDict from functools import partial from typing import Optional, Union, Callable import torch import torch.nn as nn from torch.nn import functional as F from .adaptive_avgmax_pool import SelectAdaptivePool2d from .create_act import get_act_layer from .create_norm import get_norm_layer def _create_pool( num_features: int, num_classes: int, pool_type: str = 'avg', use_conv: bool = False, input_fmt: Optional[str] = None, ): flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling if not pool_type: assert num_classes == 0 or use_conv,\ 'Pooling can only be disabled if classifier is also removed or conv classifier is used' flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) global_pool = SelectAdaptivePool2d( pool_type=pool_type, flatten=flatten_in_pool, input_fmt=input_fmt, ) num_pooled_features = num_features * global_pool.feat_mult() return global_pool, num_pooled_features def _create_fc(num_features, num_classes, use_conv=False): if num_classes <= 0: fc = nn.Identity() # pass-through (no classifier) elif use_conv: fc = nn.Conv2d(num_features, num_classes, 1, bias=True) else: fc = nn.Linear(num_features, num_classes, bias=True) return fc def create_classifier( num_features: int, num_classes: int, pool_type: str = 'avg', use_conv: bool = False, input_fmt: str = 'NCHW', drop_rate: Optional[float] = None, ): global_pool, num_pooled_features = _create_pool( num_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt, ) fc = _create_fc( num_pooled_features, num_classes, use_conv=use_conv, ) if drop_rate is not None: dropout = nn.Dropout(drop_rate) return global_pool, dropout, fc return global_pool, fc class ClassifierHead(nn.Module): """Classifier head w/ configurable global pooling and dropout.""" def __init__( self, in_features: int, num_classes: int, pool_type: str = 'avg', drop_rate: float = 0., use_conv: bool = False, input_fmt: str = 'NCHW', ): """ Args: in_features: The number of input features. num_classes: The number of classes for the final classifier layer (output). pool_type: Global pooling type, pooling disabled if empty string (''). drop_rate: Pre-classifier dropout rate. """ super(ClassifierHead, self).__init__() self.in_features = in_features self.use_conv = use_conv self.input_fmt = input_fmt global_pool, fc = create_classifier( in_features, num_classes, pool_type, use_conv=use_conv, input_fmt=input_fmt, ) self.global_pool = global_pool self.drop = nn.Dropout(drop_rate) self.fc = fc self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() def reset(self, num_classes, pool_type=None): if pool_type is not None and pool_type != self.global_pool.pool_type: self.global_pool, self.fc = create_classifier( self.in_features, num_classes, pool_type=pool_type, use_conv=self.use_conv, input_fmt=self.input_fmt, ) self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity() else: num_pooled_features = self.in_features * self.global_pool.feat_mult() self.fc = _create_fc( num_pooled_features, num_classes, use_conv=self.use_conv, ) def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.drop(x) if pre_logits: return self.flatten(x) x = self.fc(x) return self.flatten(x) class NormMlpClassifierHead(nn.Module): def __init__( self, in_features: int, num_classes: int, hidden_size: Optional[int] = None, pool_type: str = 'avg', drop_rate: float = 0., norm_layer: Union[str, Callable] = 'layernorm2d', act_layer: Union[str, Callable] = 'tanh', ): """ Args: in_features: The number of input features. num_classes: The number of classes for the final classifier layer (output). hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None. pool_type: Global pooling type, pooling disabled if empty string (''). drop_rate: Pre-classifier dropout rate. norm_layer: Normalization layer type. act_layer: MLP activation layer type (only used if hidden_size is not None). """ super().__init__() self.in_features = in_features self.hidden_size = hidden_size self.num_features = in_features self.use_conv = not pool_type norm_layer = get_norm_layer(norm_layer) act_layer = get_act_layer(act_layer) linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) self.norm = norm_layer(in_features) self.flatten = nn.Flatten(1) if pool_type else nn.Identity() if hidden_size: self.pre_logits = nn.Sequential(OrderedDict([ ('fc', linear_layer(in_features, hidden_size)), ('act', act_layer()), ])) self.num_features = hidden_size else: self.pre_logits = nn.Identity() self.drop = nn.Dropout(drop_rate) self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def reset(self, num_classes, global_pool=None): if global_pool is not None: self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() self.use_conv = self.global_pool.is_identity() linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear if self.hidden_size: if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)): with torch.no_grad(): new_fc = linear_layer(self.in_features, self.hidden_size) new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape)) new_fc.bias.copy_(self.pre_logits.fc.bias) self.pre_logits.fc = new_fc self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, pre_logits: bool = False): x = self.global_pool(x) x = self.norm(x) x = self.flatten(x) x = self.pre_logits(x) x = self.drop(x) if pre_logits: return x x = self.fc(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/pool2d_same.py
""" AvgPool2d w/ Same Padding Hacked together by / Copyright 2020 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F from typing import List, Tuple, Optional from .helpers import to_2tuple from .padding import pad_same, get_padding_value def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), ceil_mode: bool = False, count_include_pad: bool = True): # FIXME how to deal with count_include_pad vs not for external padding? x = pad_same(x, kernel_size, stride) return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) class AvgPool2dSame(nn.AvgPool2d): """ Tensorflow like 'SAME' wrapper for 2D average pooling """ def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride) return F.avg_pool2d( x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) def max_pool2d_same( x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), dilation: List[int] = (1, 1), ceil_mode: bool = False): x = pad_same(x, kernel_size, stride, value=-float('inf')) return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) class MaxPool2dSame(nn.MaxPool2d): """ Tensorflow like 'SAME' wrapper for 2D max pooling """ def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) dilation = to_2tuple(dilation) super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) def forward(self, x): x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): stride = stride or kernel_size padding = kwargs.pop('padding', '') padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) if is_dynamic: if pool_type == 'avg': return AvgPool2dSame(kernel_size, stride=stride, **kwargs) elif pool_type == 'max': return MaxPool2dSame(kernel_size, stride=stride, **kwargs) else: assert False, f'Unsupported pool type {pool_type}' else: if pool_type == 'avg': return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) elif pool_type == 'max': return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) else: assert False, f'Unsupported pool type {pool_type}'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/std_conv.py
""" Convolution with Weight Standardization (StdConv and ScaledStdConv) StdConv: @article{weightstandardization, author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille}, title = {Weight Standardization}, journal = {arXiv preprint arXiv:1903.10520}, year = {2019}, } Code: https://github.com/joe-siyuan-qiao/WeightStandardization ScaledStdConv: Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets Hacked together by / copyright Ross Wightman, 2021. """ import torch import torch.nn as nn import torch.nn.functional as F from .padding import get_padding, get_padding_value, pad_same class StdConv2d(nn.Conv2d): """Conv2d with Weight Standardization. Used for BiT ResNet-V2 models. Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - https://arxiv.org/abs/1903.10520v2 """ def __init__( self, in_channel, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=False, eps=1e-6): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__( in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0., eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class StdConv2dSame(nn.Conv2d): """Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model. Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` - https://arxiv.org/abs/1903.10520v2 """ def __init__( self, in_channel, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=False, eps=1e-6): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__( in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, training=True, momentum=0., eps=self.eps).reshape_as(self.weight) x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class ScaledStdConv2d(nn.Conv2d): """Conv2d layer with Scaled Weight Standardization. Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=None, dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): if padding is None: padding = get_padding(kernel_size, stride, dilation) super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in) self.eps = eps def forward(self, x): weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0., eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class ScaledStdConv2dSame(nn.Conv2d): """Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` - https://arxiv.org/abs/2101.08692 NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding='SAME', dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0): padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation) super().__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init)) self.scale = gamma * self.weight[0].numel() ** -0.5 self.same_pad = is_dynamic self.eps = eps def forward(self, x): if self.same_pad: x = pad_same(x, self.kernel_size, self.stride, self.dilation) weight = F.batch_norm( self.weight.reshape(1, self.out_channels, -1), None, None, weight=(self.gain * self.scale).view(-1), training=True, momentum=0., eps=self.eps).reshape_as(self.weight) return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/activations.py
""" Activations A collection of activations fn and modules with a common interface so that they can easily be swapped. All have an `inplace` arg even if not used. Hacked together by / Copyright 2020 Ross Wightman """ import torch from torch import nn as nn from torch.nn import functional as F def swish(x, inplace: bool = False): """Swish - Described in: https://arxiv.org/abs/1710.05941 """ return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) class Swish(nn.Module): def __init__(self, inplace: bool = False): super(Swish, self).__init__() self.inplace = inplace def forward(self, x): return swish(x, self.inplace) def mish(x, inplace: bool = False): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 NOTE: I don't have a working inplace variant """ return x.mul(F.softplus(x).tanh()) class Mish(nn.Module): """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 """ def __init__(self, inplace: bool = False): super(Mish, self).__init__() def forward(self, x): return mish(x) def sigmoid(x, inplace: bool = False): return x.sigmoid_() if inplace else x.sigmoid() # PyTorch has this, but not with a consistent inplace argmument interface class Sigmoid(nn.Module): def __init__(self, inplace: bool = False): super(Sigmoid, self).__init__() self.inplace = inplace def forward(self, x): return x.sigmoid_() if self.inplace else x.sigmoid() def tanh(x, inplace: bool = False): return x.tanh_() if inplace else x.tanh() # PyTorch has this, but not with a consistent inplace argmument interface class Tanh(nn.Module): def __init__(self, inplace: bool = False): super(Tanh, self).__init__() self.inplace = inplace def forward(self, x): return x.tanh_() if self.inplace else x.tanh() def hard_swish(x, inplace: bool = False): inner = F.relu6(x + 3.).div_(6.) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): def __init__(self, inplace: bool = False): super(HardSwish, self).__init__() self.inplace = inplace def forward(self, x): return hard_swish(x, self.inplace) def hard_sigmoid(x, inplace: bool = False): if inplace: return x.add_(3.).clamp_(0., 6.).div_(6.) else: return F.relu6(x + 3.) / 6. class HardSigmoid(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoid, self).__init__() self.inplace = inplace def forward(self, x): return hard_sigmoid(x, self.inplace) def hard_mish(x, inplace: bool = False): """ Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ if inplace: return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) else: return 0.5 * x * (x + 2).clamp(min=0, max=2) class HardMish(nn.Module): def __init__(self, inplace: bool = False): super(HardMish, self).__init__() self.inplace = inplace def forward(self, x): return hard_mish(x, self.inplace) class PReLU(nn.PReLU): """Applies PReLU (w/ dummy inplace arg) """ def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: super(PReLU, self).__init__(num_parameters=num_parameters, init=init) def forward(self, input: torch.Tensor) -> torch.Tensor: return F.prelu(input, self.weight) def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: return F.gelu(x) class GELU(nn.Module): """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) """ def __init__(self, inplace: bool = False): super(GELU, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input) def gelu_tanh(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: return F.gelu(x, approximate='tanh') class GELUTanh(nn.Module): """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) """ def __init__(self, inplace: bool = False): super(GELUTanh, self).__init__() def forward(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input, approximate='tanh')
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/conv_bn_act.py
""" Conv2d + BN + Act Hacked together by / Copyright 2020 Ross Wightman """ import functools from torch import nn as nn from .create_conv2d import create_conv2d from .create_norm_act import get_norm_act_layer class ConvNormAct(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=nn.ReLU, act_kwargs=None, drop_layer=None, ): super(ConvNormAct, self).__init__() norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # NOTE for backwards compatibility with models that use separate norm and act layer definitions norm_act_layer = get_norm_act_layer(norm_layer, act_layer) # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer( out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs, ) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) return x ConvBnAct = ConvNormAct def create_aa(aa_layer, channels, stride=2, enable=True): if not aa_layer or not enable: return nn.Identity() if isinstance(aa_layer, functools.partial): if issubclass(aa_layer.func, nn.AvgPool2d): return aa_layer() else: return aa_layer(channels) elif issubclass(aa_layer, nn.AvgPool2d): return aa_layer(stride) else: return aa_layer(channels=channels, stride=stride) class ConvNormActAa(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=False, apply_act=True, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=nn.ReLU, act_kwargs=None, aa_layer=None, drop_layer=None, ): super(ConvNormActAa, self).__init__() use_aa = aa_layer is not None and stride == 2 norm_kwargs = norm_kwargs or {} act_kwargs = act_kwargs or {} self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=1 if use_aa else stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # NOTE for backwards compatibility with models that use separate norm and act layer definitions norm_act_layer = get_norm_act_layer(norm_layer, act_layer) # NOTE for backwards (weight) compatibility, norm layer name remains `.bn` if drop_layer: norm_kwargs['drop_layer'] = drop_layer self.bn = norm_act_layer(out_channels, apply_act=apply_act, act_kwargs=act_kwargs, **norm_kwargs) self.aa = create_aa(aa_layer, out_channels, stride=stride, enable=use_aa) @property def in_channels(self): return self.conv.in_channels @property def out_channels(self): return self.conv.out_channels def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.aa(x) return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/padding.py
""" Padding Helpers Hacked together by / Copyright 2020 Ross Wightman """ import math from typing import List, Tuple import torch import torch.nn.functional as F # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): if isinstance(x, torch.Tensor): return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) else: return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 def pad_same_arg( input_size: List[int], kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), ) -> List[int]: ih, iw = input_size kh, kw = kernel_size pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same( x, kernel_size: List[int], stride: List[int], dilation: List[int] = (1, 1), value: float = 0, ): ih, iw = x.size()[-2:] pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == 'same': # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == 'valid': # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/median_pool.py
""" Median Pool Hacked together by / Copyright 2020 Ross Wightman """ import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, to_4tuple class MedianPool2d(nn.Module): """ Median pool (usable as median filter when stride=1) module. Args: kernel_size: size of pooling kernel, int or 2-tuple stride: pool stride, int or 2-tuple padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad same: override padding and enforce same padding, boolean """ def __init__(self, kernel_size=3, stride=1, padding=0, same=False): super(MedianPool2d, self).__init__() self.k = to_2tuple(kernel_size) self.stride = to_2tuple(stride) self.padding = to_4tuple(padding) # convert to l, r, t, b self.same = same def _padding(self, x): if self.same: ih, iw = x.size()[2:] if ih % self.stride[0] == 0: ph = max(self.k[0] - self.stride[0], 0) else: ph = max(self.k[0] - (ih % self.stride[0]), 0) if iw % self.stride[1] == 0: pw = max(self.k[1] - self.stride[1], 0) else: pw = max(self.k[1] - (iw % self.stride[1]), 0) pl = pw // 2 pr = pw - pl pt = ph // 2 pb = ph - pt padding = (pl, pr, pt, pb) else: padding = self.padding return padding def forward(self, x): x = F.pad(x, self._padding(x), mode='reflect') x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0] return x
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/gather_excite.py
""" Gather-Excite Attention Block Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348 Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another impl that covers all of the cases. NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation Hacked together by / Copyright 2021 Ross Wightman """ import math from torch import nn as nn import torch.nn.functional as F from .create_act import create_act_layer, get_act_layer from .create_conv2d import create_conv2d from .helpers import make_divisible from .mlp import ConvMlp class GatherExcite(nn.Module): """ Gather-Excite Attention Module """ def __init__( self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True, rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'): super(GatherExcite, self).__init__() self.add_maxpool = add_maxpool act_layer = get_act_layer(act_layer) self.extent = extent if extra_params: self.gather = nn.Sequential() if extent == 0: assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params' self.gather.add_module( 'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True)) if norm_layer: self.gather.add_module(f'norm1', nn.BatchNorm2d(channels)) else: assert extent % 2 == 0 num_conv = int(math.log2(extent)) for i in range(num_conv): self.gather.add_module( f'conv{i + 1}', create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True)) if norm_layer: self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels)) if i != num_conv - 1: self.gather.add_module(f'act{i + 1}', act_layer(inplace=True)) else: self.gather = None if self.extent == 0: self.gk = 0 self.gs = 0 else: assert extent % 2 == 0 self.gk = self.extent * 2 - 1 self.gs = self.extent if not rd_channels: rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity() self.gate = create_act_layer(gate_layer) def forward(self, x): size = x.shape[-2:] if self.gather is not None: x_ge = self.gather(x) else: if self.extent == 0: # global extent x_ge = x.mean(dim=(2, 3), keepdims=True) if self.add_maxpool: # experimental codepath, may remove or change x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True) else: x_ge = F.avg_pool2d( x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False) if self.add_maxpool: # experimental codepath, may remove or change x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2) x_ge = self.mlp(x_ge) if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1: x_ge = F.interpolate(x_ge, size=size) return x * self.gate(x_ge)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/create_norm.py
""" Norm Layer Factory Create norm modules by string (to mirror create_act and creat_norm-act fns) Copyright 2022 Ross Wightman """ import types import functools import torch.nn as nn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d _NORM_MAP = dict( batchnorm=nn.BatchNorm2d, batchnorm2d=nn.BatchNorm2d, batchnorm1d=nn.BatchNorm1d, groupnorm=GroupNorm, groupnorm1=GroupNorm1, layernorm=LayerNorm, layernorm2d=LayerNorm2d, ) _NORM_TYPES = {m for n, m in _NORM_MAP.items()} def create_norm_layer(layer_name, num_features, **kwargs): layer = get_norm_layer(layer_name) layer_instance = layer(num_features, **kwargs) return layer_instance def get_norm_layer(norm_layer): assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) norm_kwargs = {} # unbind partial fn, so args can be rebound later if isinstance(norm_layer, functools.partial): norm_kwargs.update(norm_layer.keywords) norm_layer = norm_layer.func if isinstance(norm_layer, str): layer_name = norm_layer.replace('_', '') norm_layer = _NORM_MAP.get(layer_name, None) elif norm_layer in _NORM_TYPES: norm_layer = norm_layer elif isinstance(norm_layer, types.FunctionType): # if function type, assume it is a lambda/fn that creates a norm layer norm_layer = norm_layer else: type_name = norm_layer.__name__.lower().replace('_', '') norm_layer = _NORM_MAP.get(type_name, None) assert norm_layer is not None, f"No equivalent norm layer for {type_name}" if norm_kwargs: norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args return norm_layer
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/layers/ml_decoder.py
from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print("Model code-writing is not aligned currently with ml-decoder") exit(-1) if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", layer_norm_eps=1e-5) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # @torch.jit.script # class ExtrapClasses(object): # def __init__(self, num_queries: int, group_size: int): # self.num_queries = num_queries # self.group_size = group_size # # def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: # torch.Tensor): # # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) # h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) # w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) # out = (h * w).sum(dim=2) + class_embed_b # out = out.view((h.shape[0], self.group_size * self.num_queries)) # return out @torch.jit.script class GroupFC(object): def __init__(self, embed_len_decoder: int): self.embed_len_decoder = embed_len_decoder def __call__(self, h: torch.Tensor, duplicate_pooling: torch.Tensor, out_extrap: torch.Tensor): for i in range(self.embed_len_decoder): h_i = h[:, i, :] w_i = duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes # switching to 768 initial embeddings decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) # decoder decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) # non-learnable queries self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) # group fully-connected self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter( torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) self.group_fc = GroupFC(embed_len_decoder) def forward(self, x): if len(x.shape) == 4: # [bs,2048, 7,7] embedding_spatial = x.flatten(2).transpose(1, 2) else: # [bs, 197,468] embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) self.group_fc(h, self.duplicate_pooling, out_extrap) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/asymmetric_loss.py
import torch import torch.nn as nn class AsymmetricLossMultiLabel(nn.Module): def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False): super(AsymmetricLossMultiLabel, self).__init__() self.gamma_neg = gamma_neg self.gamma_pos = gamma_pos self.clip = clip self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss self.eps = eps def forward(self, x, y): """" Parameters ---------- x: input logits y: targets (multi-label binarized vector) """ # Calculating Probabilities x_sigmoid = torch.sigmoid(x) xs_pos = x_sigmoid xs_neg = 1 - x_sigmoid # Asymmetric Clipping if self.clip is not None and self.clip > 0: xs_neg = (xs_neg + self.clip).clamp(max=1) # Basic CE calculation los_pos = y * torch.log(xs_pos.clamp(min=self.eps)) los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps)) loss = los_pos + los_neg # Asymmetric Focusing if self.gamma_neg > 0 or self.gamma_pos > 0: if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(False) pt0 = xs_pos * y pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p pt = pt0 + pt1 one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y) one_sided_w = torch.pow(1 - pt, one_sided_gamma) if self.disable_torch_grad_focal_loss: torch._C.set_grad_enabled(True) loss *= one_sided_w return -loss.sum() class AsymmetricLossSingleLabel(nn.Module): def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'): super(AsymmetricLossSingleLabel, self).__init__() self.eps = eps self.logsoftmax = nn.LogSoftmax(dim=-1) self.targets_classes = [] # prevent gpu repeated memory allocation self.gamma_pos = gamma_pos self.gamma_neg = gamma_neg self.reduction = reduction def forward(self, inputs, target, reduction=None): """" Parameters ---------- x: input logits y: targets (1-hot vector) """ num_classes = inputs.size()[-1] log_preds = self.logsoftmax(inputs) self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1) # ASL weights targets = self.targets_classes anti_targets = 1 - targets xs_pos = torch.exp(log_preds) xs_neg = 1 - xs_pos xs_pos = xs_pos * targets xs_neg = xs_neg * anti_targets asymmetric_w = torch.pow(1 - xs_pos - xs_neg, self.gamma_pos * targets + self.gamma_neg * anti_targets) log_preds = log_preds * asymmetric_w if self.eps > 0: # label smoothing self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes) # loss calculation loss = - self.targets_classes.mul(log_preds) loss = loss.sum(dim=-1) if self.reduction == 'mean': loss = loss.mean() return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/__init__.py
from .asymmetric_loss import AsymmetricLossMultiLabel, AsymmetricLossSingleLabel from .binary_cross_entropy import BinaryCrossEntropy from .cross_entropy import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from .jsd import JsdCrossEntropy
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/binary_cross_entropy.py
""" Binary Cross Entropy w/ a few extras Hacked together by / Copyright 2021 Ross Wightman """ from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F class BinaryCrossEntropy(nn.Module): """ BCE with optional one-hot from dense targets, label smoothing, thresholding NOTE for experiments comparing CE to BCE /w label smoothing, may remove """ def __init__( self, smoothing=0.1, target_threshold: Optional[float] = None, weight: Optional[torch.Tensor] = None, reduction: str = 'mean', pos_weight: Optional[torch.Tensor] = None): super(BinaryCrossEntropy, self).__init__() assert 0. <= smoothing < 1.0 self.smoothing = smoothing self.target_threshold = target_threshold self.reduction = reduction self.register_buffer('weight', weight) self.register_buffer('pos_weight', pos_weight) def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: assert x.shape[0] == target.shape[0] if target.shape != x.shape: # NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse num_classes = x.shape[-1] # FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ off_value = self.smoothing / num_classes on_value = 1. - self.smoothing + off_value target = target.long().view(-1, 1) target = torch.full( (target.size()[0], num_classes), off_value, device=x.device, dtype=x.dtype).scatter_(1, target, on_value) if self.target_threshold is not None: # Make target 0, or 1 if threshold set target = target.gt(self.target_threshold).to(dtype=target.dtype) return F.binary_cross_entropy_with_logits( x, target, self.weight, pos_weight=self.pos_weight, reduction=self.reduction)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/jsd.py
import torch import torch.nn as nn import torch.nn.functional as F from .cross_entropy import LabelSmoothingCrossEntropy class JsdCrossEntropy(nn.Module): """ Jensen-Shannon Divergence + Cross-Entropy Loss Based on impl here: https://github.com/google-research/augmix/blob/master/imagenet.py From paper: 'AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty - https://arxiv.org/abs/1912.02781 Hacked together by / Copyright 2020 Ross Wightman """ def __init__(self, num_splits=3, alpha=12, smoothing=0.1): super().__init__() self.num_splits = num_splits self.alpha = alpha if smoothing is not None and smoothing > 0: self.cross_entropy_loss = LabelSmoothingCrossEntropy(smoothing) else: self.cross_entropy_loss = torch.nn.CrossEntropyLoss() def __call__(self, output, target): split_size = output.shape[0] // self.num_splits assert split_size * self.num_splits == output.shape[0] logits_split = torch.split(output, split_size) # Cross-entropy is only computed on clean images loss = self.cross_entropy_loss(logits_split[0], target[:split_size]) probs = [F.softmax(logits, dim=1) for logits in logits_split] # Clamp mixture distribution to avoid exploding KL divergence logp_mixture = torch.clamp(torch.stack(probs).mean(axis=0), 1e-7, 1).log() loss += self.alpha * sum([F.kl_div( logp_mixture, p_split, reduction='batchmean') for p_split in probs]) / len(probs) return loss
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/loss/cross_entropy.py
""" Cross Entropy w/ smoothing or soft targets Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.1): super(LabelSmoothingCrossEntropy, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1. - smoothing def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() class SoftTargetCrossEntropy(nn.Module): def __init__(self): super(SoftTargetCrossEntropy, self).__init__() def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) return loss.mean()
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/distributed.py
""" Distributed training/validation utils Hacked together by / Copyright 2020 Ross Wightman """ import os import torch from torch import distributed as dist try: import horovod.torch as hvd except ImportError: hvd = None from .model import unwrap_model def reduce_tensor(tensor, n): rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) rt /= n return rt def distribute_bn(model, world_size, reduce=False): # ensure every node has the same running bn stats for bn_name, bn_buf in unwrap_model(model).named_buffers(recurse=True): if ('running_mean' in bn_name) or ('running_var' in bn_name): if reduce: # average bn stats across whole group torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM) bn_buf /= float(world_size) else: # broadcast bn stats from rank 0 to whole group torch.distributed.broadcast(bn_buf, 0) def is_global_primary(args): return args.rank == 0 def is_local_primary(args): return args.local_rank == 0 def is_primary(args, local=False): return is_local_primary(args) if local else is_global_primary(args) def is_distributed_env(): if 'WORLD_SIZE' in os.environ: return int(os.environ['WORLD_SIZE']) > 1 if 'SLURM_NTASKS' in os.environ: return int(os.environ['SLURM_NTASKS']) > 1 return False def world_info_from_env(): local_rank = 0 for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'): if v in os.environ: local_rank = int(os.environ[v]) break global_rank = 0 for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'): if v in os.environ: global_rank = int(os.environ[v]) break world_size = 1 for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'): if v in os.environ: world_size = int(os.environ[v]) break return local_rank, global_rank, world_size def init_distributed_device(args): # Distributed training = training on more than one GPU. # Works in both single and multi-node scenarios. args.distributed = False args.world_size = 1 args.rank = 0 # global rank args.local_rank = 0 # TBD, support horovod? # if args.horovod: # assert hvd is not None, "Horovod is not installed" # hvd.init() # args.local_rank = int(hvd.local_rank()) # args.rank = hvd.rank() # args.world_size = hvd.size() # args.distributed = True # os.environ['LOCAL_RANK'] = str(args.local_rank) # os.environ['RANK'] = str(args.rank) # os.environ['WORLD_SIZE'] = str(args.world_size) dist_backend = getattr(args, 'dist_backend', 'nccl') dist_url = getattr(args, 'dist_url', 'env://') if is_distributed_env(): if 'SLURM_PROCID' in os.environ: # DDP via SLURM args.local_rank, args.rank, args.world_size = world_info_from_env() # SLURM var -> torch.distributed vars in case needed os.environ['LOCAL_RANK'] = str(args.local_rank) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) torch.distributed.init_process_group( backend=dist_backend, init_method=dist_url, world_size=args.world_size, rank=args.rank, ) else: # DDP via torchrun, torch.distributed.launch args.local_rank, _, _ = world_info_from_env() torch.distributed.init_process_group( backend=dist_backend, init_method=dist_url, ) args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() args.distributed = True if torch.cuda.is_available(): if args.distributed: device = 'cuda:%d' % args.local_rank else: device = 'cuda:0' torch.cuda.set_device(device) else: device = 'cpu' args.device = device device = torch.device(device) return device
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/__init__.py
from .agc import adaptive_clip_grad from .checkpoint_saver import CheckpointSaver from .clip_grad import dispatch_clip_grad from .cuda import ApexScaler, NativeScaler from .decay_batch import decay_batch_step, check_batch_size_retry from .distributed import distribute_bn, reduce_tensor, init_distributed_device,\ world_info_from_env, is_distributed_env, is_primary from .jit import set_jit_legacy, set_jit_fuser from .log import setup_default_logging, FormatterNoInfo from .metrics import AverageMeter, accuracy from .misc import natural_key, add_bool_arg, ParseKwargs from .model import unwrap_model, get_state_dict, freeze, unfreeze from .model_ema import ModelEma, ModelEmaV2 from .random import random_seed from .summary import update_summary, get_outdir
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/model.py
""" Model / state_dict utils Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch import torch from torchvision.ops.misc import FrozenBatchNorm2d from timm.layers import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d,\ freeze_batch_norm_2d, unfreeze_batch_norm_2d from .model_ema import ModelEma def unwrap_model(model): if isinstance(model, ModelEma): return unwrap_model(model.ema) else: return model.module if hasattr(model, 'module') else model def get_state_dict(model, unwrap_fn=unwrap_model): return unwrap_fn(model).state_dict() def avg_sq_ch_mean(model, input, output): """ calculate average channel square mean of output activations """ return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() def avg_ch_var(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() def avg_ch_var_residual(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() class ActivationStatsHook: """Iterates through each of `model`'s modules and matches modules using unix pattern matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is a match. Arguments: model (nn.Module): model from which we will extract the activation stats hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string matching with the name of model's modules. hook_fns (List[Callable]): List of hook functions to be registered at every module in `layer_names`. Inspiration from https://docs.fast.ai/callback.hook.html. Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example on how to plot Signal Propogation Plots using `ActivationStatsHook`. """ def __init__(self, model, hook_fn_locs, hook_fns): self.model = model self.hook_fn_locs = hook_fn_locs self.hook_fns = hook_fns if len(hook_fn_locs) != len(hook_fns): raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ their lengths are different.") self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): self.register_hook(hook_fn_loc, hook_fn) def _create_hook(self, hook_fn): def append_activation_stats(module, input, output): out = hook_fn(module, input, output) self.stats[hook_fn.__name__].append(out) return append_activation_stats def register_hook(self, hook_fn_loc, hook_fn): for name, module in self.model.named_modules(): if not fnmatch.fnmatch(name, hook_fn_loc): continue module.register_forward_hook(self._create_hook(hook_fn)) def extract_spp_stats( model, hook_fn_locs, hook_fns, input_shape=[8, 3, 224, 224]): """Extract average square channel mean and variance of activations during forward pass to plot Signal Propogation Plots (SPP). Paper: https://arxiv.org/abs/2101.08692 Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 """ x = torch.normal(0., 1., input_shape) hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) _ = model(x) return hook.stats def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): """ Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be (un)frozen. Defaults to [] include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. Defaults to `True`. mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. """ assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' if isinstance(root_module, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): # Raise assertion here because we can't convert it in place raise AssertionError( "You have provided a batch norm layer as the `root module`. Please use " "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") if isinstance(submodules, str): submodules = [submodules] named_modules = submodules submodules = [root_module.get_submodule(m) for m in submodules] if not len(submodules): named_modules, submodules = list(zip(*root_module.named_children())) for n, m in zip(named_modules, submodules): # (Un)freeze parameters for p in m.parameters(): p.requires_grad = False if mode == 'freeze' else True if include_bn_running_stats: # Helper to add submodule specified as a named_module def _add_submodule(module, name, submodule): split = name.rsplit('.', 1) if len(split) > 1: module.get_submodule(split[0]).add_module(split[1], submodule) else: module.add_module(name, submodule) # Freeze batch norm if mode == 'freeze': res = freeze_batch_norm_2d(m) # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't # convert it in place, but will return the converted result. In this case `res` holds the converted # result and we may try to re-assign the named module if isinstance(m, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): _add_submodule(root_module, n, res) # Unfreeze batch norm else: res = unfreeze_batch_norm_2d(m) # Ditto. See note above in mode == 'freeze' branch if isinstance(m, (FrozenBatchNorm2d, FrozenBatchNormAct2d)): _add_submodule(root_module, n, res) def freeze(root_module, submodules=[], include_bn_running_stats=True): """ Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be frozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, it's good practice to freeze batch norm stats. And note that these are different to the affine parameters which are just normal PyTorch parameters. Defaults to `True`. Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. Examples:: >>> model = timm.create_model('resnet18') >>> # Freeze up to and including layer2 >>> submodules = [n for n, _ in model.named_children()] >>> print(submodules) ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] >>> freeze(model, submodules[:submodules.index('layer2') + 1]) >>> # Check for yourself that it works as expected >>> print(model.layer2[0].conv1.weight.requires_grad) False >>> print(model.layer3[0].conv1.weight.requires_grad) True >>> # Unfreeze >>> unfreeze(model) """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") def unfreeze(root_module, submodules=[], include_bn_running_stats=True): """ Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be unfrozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. These will be converted to `BatchNorm2d` in place. Defaults to `True`. See example in docstring for `freeze`. """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze")
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/jit.py
""" JIT scripting/tracing utils Hacked together by / Copyright 2020 Ross Wightman """ import os import torch def set_jit_legacy(): """ Set JIT executor to legacy w/ support for op fusion This is hopefully a temporary need in 1.5/1.5.1/1.6 to restore performance due to changes in the JIT exectutor. These API are not supported so could change. """ # assert hasattr(torch._C, '_jit_set_profiling_executor'), "Old JIT behavior doesn't exist!" torch._C._jit_set_profiling_executor(False) torch._C._jit_set_profiling_mode(False) torch._C._jit_override_can_fuse_on_gpu(True) #torch._C._jit_set_texpr_fuser_enabled(True) def set_jit_fuser(fuser): if fuser == "te": # default fuser should be == 'te' torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(True) try: torch._C._jit_set_nvfuser_enabled(False) except Exception: pass elif fuser == "old" or fuser == "legacy": torch._C._jit_set_profiling_executor(False) torch._C._jit_set_profiling_mode(False) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(False) try: torch._C._jit_set_nvfuser_enabled(False) except Exception: pass elif fuser == "nvfuser" or fuser == "nvf": os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1' #os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1' #os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0' torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_can_fuse_on_cpu() torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_nvfuser_guard_mode(True) torch._C._jit_set_nvfuser_enabled(True) else: assert False, f"Invalid jit fuser ({fuser})"
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/metrics.py
""" Eval metrics and related Hacked together by / Copyright 2020 Ross Wightman """ class AverageMeter: """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" maxk = min(max(topk), output.size()[1]) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.reshape(1, -1).expand_as(pred)) return [correct[:min(k, maxk)].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/clip_grad.py
import torch from timm.utils.agc import adaptive_clip_grad def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): """ Dispatch to gradient clipping method Args: parameters (Iterable): model parameters to clip value (float): clipping value/factor/norm, mode dependant mode (str): clipping mode, one of 'norm', 'value', 'agc' norm_type (float): p-norm, default 2.0 """ if mode == 'norm': torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) elif mode == 'value': torch.nn.utils.clip_grad_value_(parameters, value) elif mode == 'agc': adaptive_clip_grad(parameters, value, norm_type=norm_type) else: assert False, f"Unknown clip mode ({mode})."
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/summary.py
""" Summary utilities Hacked together by / Copyright 2020 Ross Wightman """ import csv import os from collections import OrderedDict try: import wandb except ImportError: pass def get_outdir(path, *paths, inc=False): outdir = os.path.join(path, *paths) if not os.path.exists(outdir): os.makedirs(outdir) elif inc: count = 1 outdir_inc = outdir + '-' + str(count) while os.path.exists(outdir_inc): count = count + 1 outdir_inc = outdir + '-' + str(count) assert count < 100 outdir = outdir_inc os.makedirs(outdir) return outdir def update_summary( epoch, train_metrics, eval_metrics, filename, lr=None, write_header=False, log_wandb=False, ): rowd = OrderedDict(epoch=epoch) rowd.update([('train_' + k, v) for k, v in train_metrics.items()]) rowd.update([('eval_' + k, v) for k, v in eval_metrics.items()]) if lr is not None: rowd['lr'] = lr if log_wandb: wandb.log(rowd) with open(filename, mode='a') as cf: dw = csv.DictWriter(cf, fieldnames=rowd.keys()) if write_header: # first iteration (epoch == 1 can't be used) dw.writeheader() dw.writerow(rowd)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/model_ema.py
""" Exponential Moving Average (EMA) of model updates Hacked together by / Copyright 2020 Ross Wightman """ import logging from collections import OrderedDict from copy import deepcopy import torch import torch.nn as nn _logger = logging.getLogger(__name__) class ModelEma: """ Model Exponential Moving Average (DEPRECATED) Keep a moving average of everything in the model state_dict (parameters and buffers). This version is deprecated, it does not work with scripted models. Will be removed eventually. This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device='', resume=''): # make a copy of the model for accumulating moving average of weights self.ema = deepcopy(model) self.ema.eval() self.decay = decay self.device = device # perform ema on different device from model if set if device: self.ema.to(device=device) self.ema_has_module = hasattr(self.ema, 'module') if resume: self._load_checkpoint(resume) for p in self.ema.parameters(): p.requires_grad_(False) def _load_checkpoint(self, checkpoint_path): checkpoint = torch.load(checkpoint_path, map_location='cpu') assert isinstance(checkpoint, dict) if 'state_dict_ema' in checkpoint: new_state_dict = OrderedDict() for k, v in checkpoint['state_dict_ema'].items(): # ema model may have been wrapped by DataParallel, and need module prefix if self.ema_has_module: name = 'module.' + k if not k.startswith('module') else k else: name = k new_state_dict[name] = v self.ema.load_state_dict(new_state_dict) _logger.info("Loaded state_dict_ema") else: _logger.warning("Failed to find state_dict_ema, starting from loaded model weights") def update(self, model): # correct a mismatch in state dict keys needs_module = hasattr(model, 'module') and not self.ema_has_module with torch.no_grad(): msd = model.state_dict() for k, ema_v in self.ema.state_dict().items(): if needs_module: k = 'module.' + k model_v = msd[k].detach() if self.device: model_v = model_v.to(device=self.device) ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v) class ModelEmaV2(nn.Module): """ Model Exponential Moving Average V2 Keep a moving average of everything in the model state_dict (parameters and buffers). V2 of this module is simpler, it does not match params/buffers based on name but simply iterates in order. It works with torchscript (JIT of full model). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA smoothing of weights to match results. Pay attention to the decay constant you are using relative to your update count per epoch. To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but disable validation of the EMA weights. Validation will have to be done manually in a separate process, or after the training stops converging. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, device=None): super(ModelEmaV2, self).__init__() # make a copy of the model for accumulating moving average of weights self.module = deepcopy(model) self.module.eval() self.decay = decay self.device = device # perform ema on different device from model if set if self.device is not None: self.module.to(device=device) def _update(self, model, update_fn): with torch.no_grad(): for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()): if self.device is not None: model_v = model_v.to(device=self.device) ema_v.copy_(update_fn(ema_v, model_v)) def update(self, model): self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m) def set(self, model): self._update(model, update_fn=lambda e, m: m)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/decay_batch.py
""" Batch size decay and retry helpers. Copyright 2022 Ross Wightman """ import math def decay_batch_step(batch_size, num_intra_steps=2, no_odd=False): """ power of two batch-size decay with intra steps Decay by stepping between powers of 2: * determine power-of-2 floor of current batch size (base batch size) * divide above value by num_intra_steps to determine step size * floor batch_size to nearest multiple of step_size (from base batch size) Examples: num_steps == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1 num_steps (no_odd=True) == 4 --> 64, 56, 48, 40, 32, 28, 24, 20, 16, 14, 12, 10, 8, 6, 4, 2 num_steps == 2 --> 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1 num_steps == 1 --> 64, 32, 16, 8, 4, 2, 1 """ if batch_size <= 1: # return 0 for stopping value so easy to use in loop return 0 base_batch_size = int(2 ** (math.log(batch_size - 1) // math.log(2))) step_size = max(base_batch_size // num_intra_steps, 1) batch_size = base_batch_size + ((batch_size - base_batch_size - 1) // step_size) * step_size if no_odd and batch_size % 2: batch_size -= 1 return batch_size def check_batch_size_retry(error_str): """ check failure error string for conditions where batch decay retry should not be attempted """ error_str = error_str.lower() if 'required rank' in error_str: # Errors involving phrase 'required rank' typically happen when a conv is used that's # not compatible with channels_last memory format. return False if 'illegal' in error_str: # 'Illegal memory access' errors in CUDA typically leave process in unusable state return False return True
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/agc.py
""" Adaptive Gradient Clipping An impl of AGC, as per (https://arxiv.org/abs/2102.06171): @article{brock2021high, author={Andrew Brock and Soham De and Samuel L. Smith and Karen Simonyan}, title={High-Performance Large-Scale Image Recognition Without Normalization}, journal={arXiv preprint arXiv:}, year={2021} } Code references: * Official JAX impl (paper authors): https://github.com/deepmind/deepmind-research/tree/master/nfnets * Phil Wang's PyTorch gist: https://gist.github.com/lucidrains/0d6560077edac419ab5d3aa29e674d5c Hacked together by / Copyright 2021 Ross Wightman """ import torch def unitwise_norm(x, norm_type=2.0): if x.ndim <= 1: return x.norm(norm_type) else: # works for nn.ConvNd and nn,Linear where output dim is first in the kernel/weight tensor # might need special cases for other weights (possibly MHA) where this may not be true return x.norm(norm_type, dim=tuple(range(1, x.ndim)), keepdim=True) def adaptive_clip_grad(parameters, clip_factor=0.01, eps=1e-3, norm_type=2.0): if isinstance(parameters, torch.Tensor): parameters = [parameters] for p in parameters: if p.grad is None: continue p_data = p.detach() g_data = p.grad.detach() max_norm = unitwise_norm(p_data, norm_type=norm_type).clamp_(min=eps).mul_(clip_factor) grad_norm = unitwise_norm(g_data, norm_type=norm_type) clipped_grad = g_data * (max_norm / grad_norm.clamp(min=1e-6)) new_grads = torch.where(grad_norm < max_norm, g_data, clipped_grad) p.grad.detach().copy_(new_grads)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/onnx.py
from typing import Optional, Tuple, List import torch def onnx_forward(onnx_file, example_input): import onnxruntime sess_options = onnxruntime.SessionOptions() session = onnxruntime.InferenceSession(onnx_file, sess_options) input_name = session.get_inputs()[0].name output = session.run([], {input_name: example_input.numpy()}) output = output[0] return output def onnx_export( model: torch.nn.Module, output_file: str, example_input: Optional[torch.Tensor] = None, training: bool = False, verbose: bool = False, check: bool = True, check_forward: bool = False, batch_size: int = 64, input_size: Tuple[int, int, int] = None, opset: Optional[int] = None, dynamic_size: bool = False, aten_fallback: bool = False, keep_initializers: Optional[bool] = None, input_names: List[str] = None, output_names: List[str] = None, ): import onnx if training: training_mode = torch.onnx.TrainingMode.TRAINING model.train() else: training_mode = torch.onnx.TrainingMode.EVAL model.eval() if example_input is None: if not input_size: assert hasattr(model, 'default_cfg') input_size = model.default_cfg.get('input_size') example_input = torch.randn((batch_size,) + input_size, requires_grad=training) # Run model once before export trace, sets padding for models with Conv2dSameExport. This means # that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for # the input img_size specified in this script. # Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to # issues in the tracing of the dynamic padding or errors attempting to export the model after jit # scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions... original_out = model(example_input) input_names = input_names or ["input0"] output_names = output_names or ["output0"] dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}} if dynamic_size: dynamic_axes['input0'][2] = 'height' dynamic_axes['input0'][3] = 'width' if aten_fallback: export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK else: export_type = torch.onnx.OperatorExportTypes.ONNX torch_out = torch.onnx._export( model, example_input, output_file, training=training_mode, export_params=True, verbose=verbose, input_names=input_names, output_names=output_names, keep_initializers_as_inputs=keep_initializers, dynamic_axes=dynamic_axes, opset_version=opset, operator_export_type=export_type ) if check: onnx_model = onnx.load(output_file) onnx.checker.check_model(onnx_model, full_check=True) # assuming throw on error if check_forward and not training: import numpy as np onnx_out = onnx_forward(output_file, example_input) np.testing.assert_almost_equal(torch_out.data.numpy(), onnx_out, decimal=3) np.testing.assert_almost_equal(original_out.data.numpy(), torch_out.data.numpy(), decimal=5)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/checkpoint_saver.py
""" Checkpoint Saver Track top-n training checkpoints and maintain recovery checkpoints on specified intervals. Hacked together by / Copyright 2020 Ross Wightman """ import glob import operator import os import logging import torch from .model import unwrap_model, get_state_dict _logger = logging.getLogger(__name__) class CheckpointSaver: def __init__( self, model, optimizer, args=None, model_ema=None, amp_scaler=None, checkpoint_prefix='checkpoint', recovery_prefix='recovery', checkpoint_dir='', recovery_dir='', decreasing=False, max_history=10, unwrap_fn=unwrap_model): # objects to save state_dicts of self.model = model self.optimizer = optimizer self.args = args self.model_ema = model_ema self.amp_scaler = amp_scaler # state self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness self.best_epoch = None self.best_metric = None self.curr_recovery_file = '' self.last_recovery_file = '' # config self.checkpoint_dir = checkpoint_dir self.recovery_dir = recovery_dir self.save_prefix = checkpoint_prefix self.recovery_prefix = recovery_prefix self.extension = '.pth.tar' self.decreasing = decreasing # a lower metric is better if True self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs self.max_history = max_history self.unwrap_fn = unwrap_fn assert self.max_history >= 1 def save_checkpoint(self, epoch, metric=None): assert epoch >= 0 tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension) last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension) self._save(tmp_save_path, epoch, metric) if os.path.exists(last_save_path): os.unlink(last_save_path) # required for Windows support. os.rename(tmp_save_path, last_save_path) worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None if (len(self.checkpoint_files) < self.max_history or metric is None or self.cmp(metric, worst_file[1])): if len(self.checkpoint_files) >= self.max_history: self._cleanup_checkpoints(1) filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension save_path = os.path.join(self.checkpoint_dir, filename) os.link(last_save_path, save_path) self.checkpoint_files.append((save_path, metric)) self.checkpoint_files = sorted( self.checkpoint_files, key=lambda x: x[1], reverse=not self.decreasing) # sort in descending order if a lower metric is not better checkpoints_str = "Current checkpoints:\n" for c in self.checkpoint_files: checkpoints_str += ' {}\n'.format(c) _logger.info(checkpoints_str) if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)): self.best_epoch = epoch self.best_metric = metric best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension) if os.path.exists(best_save_path): os.unlink(best_save_path) os.link(last_save_path, best_save_path) return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch) def _save(self, save_path, epoch, metric=None): save_state = { 'epoch': epoch, 'arch': type(self.model).__name__.lower(), 'state_dict': get_state_dict(self.model, self.unwrap_fn), 'optimizer': self.optimizer.state_dict(), 'version': 2, # version < 2 increments epoch before save } if self.args is not None: save_state['arch'] = self.args.model save_state['args'] = self.args if self.amp_scaler is not None: save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict() if self.model_ema is not None: save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn) if metric is not None: save_state['metric'] = metric torch.save(save_state, save_path) def _cleanup_checkpoints(self, trim=0): trim = min(len(self.checkpoint_files), trim) delete_index = self.max_history - trim if delete_index < 0 or len(self.checkpoint_files) <= delete_index: return to_delete = self.checkpoint_files[delete_index:] for d in to_delete: try: _logger.debug("Cleaning checkpoint: {}".format(d)) os.remove(d[0]) except Exception as e: _logger.error("Exception '{}' while deleting checkpoint".format(e)) self.checkpoint_files = self.checkpoint_files[:delete_index] def save_recovery(self, epoch, batch_idx=0): assert epoch >= 0 filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension save_path = os.path.join(self.recovery_dir, filename) self._save(save_path, epoch) if os.path.exists(self.last_recovery_file): try: _logger.debug("Cleaning recovery: {}".format(self.last_recovery_file)) os.remove(self.last_recovery_file) except Exception as e: _logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file)) self.last_recovery_file = self.curr_recovery_file self.curr_recovery_file = save_path def find_recovery(self): recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix) files = glob.glob(recovery_path + '*' + self.extension) files = sorted(files) return files[0] if len(files) else ''
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/random.py
import random import numpy as np import torch def random_seed(seed=42, rank=0): torch.manual_seed(seed + rank) np.random.seed(seed + rank) random.seed(seed + rank)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/cuda.py
""" CUDA / AMP utils Hacked together by / Copyright 2020 Ross Wightman """ import torch try: from apex import amp has_apex = True except ImportError: amp = None has_apex = False from .clip_grad import dispatch_clip_grad class ApexScaler: state_dict_key = "amp" def __call__( self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, need_update=True, ): with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward(create_graph=create_graph) if need_update: if clip_grad is not None: dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode) optimizer.step() def state_dict(self): if 'state_dict' in amp.__dict__: return amp.state_dict() def load_state_dict(self, state_dict): if 'load_state_dict' in amp.__dict__: amp.load_state_dict(state_dict) class NativeScaler: state_dict_key = "amp_scaler" def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__( self, loss, optimizer, clip_grad=None, clip_mode='norm', parameters=None, create_graph=False, need_update=True, ): self._scaler.scale(loss).backward(create_graph=create_graph) if need_update: if clip_grad is not None: assert parameters is not None self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place dispatch_clip_grad(parameters, clip_grad, mode=clip_mode) self._scaler.step(optimizer) self._scaler.update() def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/misc.py
""" Misc utils Hacked together by / Copyright 2020 Ross Wightman """ import argparse import ast import re def natural_key(string_): """See http://www.codinghorror.com/blog/archives/001018.html""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def add_bool_arg(parser, name, default=False, help=''): dest_name = name.replace('-', '_') group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--' + name, dest=dest_name, action='store_true', help=help) group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help) parser.set_defaults(**{dest_name: default}) class ParseKwargs(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): kw = {} for value in values: key, value = value.split('=') try: kw[key] = ast.literal_eval(value) except ValueError: kw[key] = str(value) # fallback to string (avoid need to escape on command line) setattr(namespace, self.dest, kw)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/utils/log.py
""" Logging helpers Hacked together by / Copyright 2020 Ross Wightman """ import logging import logging.handlers class FormatterNoInfo(logging.Formatter): def __init__(self, fmt='%(levelname)s: %(message)s'): logging.Formatter.__init__(self, fmt) def format(self, record): if record.levelno == logging.INFO: return str(record.getMessage()) return logging.Formatter.format(self, record) def setup_default_logging(default_level=logging.INFO, log_path=''): console_handler = logging.StreamHandler() console_handler.setFormatter(FormatterNoInfo()) logging.root.addHandler(console_handler) logging.root.setLevel(default_level) if log_path: file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3) file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s") file_handler.setFormatter(file_formatter) logging.root.addHandler(file_handler)
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/poly_lr.py
""" Polynomial Scheduler Polynomial LR schedule with warmup, noise. Hacked together by / Copyright 2021 Ross Wightman """ import math import logging import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class PolyLRScheduler(Scheduler): """ Polynomial LR Scheduler w/ warmup, noise, and k-decay k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, power: float = 0.5, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning("Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.power = power self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/__init__.py
from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/plateau_lr.py
""" Plateau Scheduler Adapts PyTorch plateau scheduler and allows application of noise, warmup. Hacked together by / Copyright 2020 Ross Wightman """ import torch from .scheduler import Scheduler class PlateauLRScheduler(Scheduler): """Decay the LR by a factor every time the validation loss plateaus.""" def __init__( self, optimizer, decay_rate=0.1, patience_t=10, verbose=True, threshold=1e-4, cooldown_t=0, warmup_t=0, warmup_lr_init=0, lr_min=0, mode='max', noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize=True, ): super().__init__( optimizer, 'lr', noise_range_t=noise_range_t, noise_type=noise_type, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, patience=patience_t, factor=decay_rate, verbose=verbose, threshold=threshold, cooldown=cooldown_t, mode=mode, min_lr=lr_min ) self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] self.restore_lr = None def state_dict(self): return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] # override the base class step fn completely def step(self, epoch, metric=None): if epoch <= self.warmup_t: lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] super().update_groups(lrs) else: if self.restore_lr is not None: # restore actual LR from before our last noise perturbation before stepping base for i, param_group in enumerate(self.optimizer.param_groups): param_group['lr'] = self.restore_lr[i] self.restore_lr = None self.lr_scheduler.step(metric, epoch) # step the base scheduler if self._is_apply_noise(epoch): self._apply_noise(epoch) def step_update(self, num_updates: int, metric: float = None): return None def _apply_noise(self, epoch): noise = self._calculate_noise(epoch) # apply the noise on top of previous LR, cache the old value so we can restore for normal # stepping of base scheduler restore_lr = [] for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) restore_lr.append(old_lr) new_lr = old_lr + old_lr * noise param_group['lr'] = new_lr self.restore_lr = restore_lr def _get_lr(self, t: int) -> float: assert False, 'should not be called as step is overridden'
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/scheduler_factory.py
""" Scheduler Factory Hacked together by / Copyright 2021 Ross Wightman """ from typing import List, Union from torch.optim import Optimizer from .cosine_lr import CosineLRScheduler from .multistep_lr import MultiStepLRScheduler from .plateau_lr import PlateauLRScheduler from .poly_lr import PolyLRScheduler from .step_lr import StepLRScheduler from .tanh_lr import TanhLRScheduler def scheduler_kwargs(cfg): """ cfg/argparse to kwargs helper Convert scheduler args in argparse args or cfg (.dot) like object to keyword args. """ eval_metric = getattr(cfg, 'eval_metric', 'top1') plateau_mode = 'min' if 'loss' in eval_metric else 'max' kwargs = dict( sched=cfg.sched, num_epochs=getattr(cfg, 'epochs', 100), decay_epochs=getattr(cfg, 'decay_epochs', 30), decay_milestones=getattr(cfg, 'decay_milestones', [30, 60]), warmup_epochs=getattr(cfg, 'warmup_epochs', 5), cooldown_epochs=getattr(cfg, 'cooldown_epochs', 0), patience_epochs=getattr(cfg, 'patience_epochs', 10), decay_rate=getattr(cfg, 'decay_rate', 0.1), min_lr=getattr(cfg, 'min_lr', 0.), warmup_lr=getattr(cfg, 'warmup_lr', 1e-5), warmup_prefix=getattr(cfg, 'warmup_prefix', False), noise=getattr(cfg, 'lr_noise', None), noise_pct=getattr(cfg, 'lr_noise_pct', 0.67), noise_std=getattr(cfg, 'lr_noise_std', 1.), noise_seed=getattr(cfg, 'seed', 42), cycle_mul=getattr(cfg, 'lr_cycle_mul', 1.), cycle_decay=getattr(cfg, 'lr_cycle_decay', 0.1), cycle_limit=getattr(cfg, 'lr_cycle_limit', 1), k_decay=getattr(cfg, 'lr_k_decay', 1.0), plateau_mode=plateau_mode, step_on_epochs=not getattr(cfg, 'sched_on_updates', False), ) return kwargs def create_scheduler( args, optimizer: Optimizer, updates_per_epoch: int = 0, ): return create_scheduler_v2( optimizer=optimizer, **scheduler_kwargs(args), updates_per_epoch=updates_per_epoch, ) def create_scheduler_v2( optimizer: Optimizer, sched: str = 'cosine', num_epochs: int = 300, decay_epochs: int = 90, decay_milestones: List[int] = (90, 180, 270), cooldown_epochs: int = 0, patience_epochs: int = 10, decay_rate: float = 0.1, min_lr: float = 0, warmup_lr: float = 1e-5, warmup_epochs: int = 0, warmup_prefix: bool = False, noise: Union[float, List[float]] = None, noise_pct: float = 0.67, noise_std: float = 1., noise_seed: int = 42, cycle_mul: float = 1., cycle_decay: float = 0.1, cycle_limit: int = 1, k_decay: float = 1.0, plateau_mode: str = 'max', step_on_epochs: bool = True, updates_per_epoch: int = 0, ): t_initial = num_epochs warmup_t = warmup_epochs decay_t = decay_epochs cooldown_t = cooldown_epochs if not step_on_epochs: assert updates_per_epoch > 0, 'updates_per_epoch must be set to number of dataloader batches' t_initial = t_initial * updates_per_epoch warmup_t = warmup_t * updates_per_epoch decay_t = decay_t * updates_per_epoch decay_milestones = [d * updates_per_epoch for d in decay_milestones] cooldown_t = cooldown_t * updates_per_epoch # warmup args warmup_args = dict( warmup_lr_init=warmup_lr, warmup_t=warmup_t, warmup_prefix=warmup_prefix, ) # setup noise args for supporting schedulers if noise is not None: if isinstance(noise, (list, tuple)): noise_range = [n * t_initial for n in noise] if len(noise_range) == 1: noise_range = noise_range[0] else: noise_range = noise * t_initial else: noise_range = None noise_args = dict( noise_range_t=noise_range, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, ) # setup cycle args for supporting schedulers cycle_args = dict( cycle_mul=cycle_mul, cycle_decay=cycle_decay, cycle_limit=cycle_limit, ) lr_scheduler = None if sched == 'cosine': lr_scheduler = CosineLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, k_decay=k_decay, ) elif sched == 'tanh': lr_scheduler = TanhLRScheduler( optimizer, t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, **cycle_args, **warmup_args, **noise_args, ) elif sched == 'step': lr_scheduler = StepLRScheduler( optimizer, decay_t=decay_t, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'multistep': lr_scheduler = MultiStepLRScheduler( optimizer, decay_t=decay_milestones, decay_rate=decay_rate, t_in_epochs=step_on_epochs, **warmup_args, **noise_args, ) elif sched == 'plateau': assert step_on_epochs, 'Plateau LR only supports step per epoch.' warmup_args.pop('warmup_prefix', False) lr_scheduler = PlateauLRScheduler( optimizer, decay_rate=decay_rate, patience_t=patience_epochs, cooldown_t=0, **warmup_args, lr_min=min_lr, mode=plateau_mode, **noise_args, ) elif sched == 'poly': lr_scheduler = PolyLRScheduler( optimizer, power=decay_rate, # overloading 'decay_rate' as polynomial power t_initial=t_initial, lr_min=min_lr, t_in_epochs=step_on_epochs, k_decay=k_decay, **cycle_args, **warmup_args, **noise_args, ) if hasattr(lr_scheduler, 'get_cycle_length'): # for cycle based schedulers (cosine, tanh, poly) recalculate total epochs w/ cycles & cooldown t_with_cycles_and_cooldown = lr_scheduler.get_cycle_length() + cooldown_t if step_on_epochs: num_epochs = t_with_cycles_and_cooldown else: num_epochs = t_with_cycles_and_cooldown // updates_per_epoch return lr_scheduler, num_epochs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/cosine_lr.py
""" Cosine Scheduler Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. Hacked together by / Copyright 2021 Ross Wightman """ import logging import math import numpy as np import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class CosineLRScheduler(Scheduler): """ Cosine decay with restarts. This is described in the paper https://arxiv.org/abs/1608.03983. Inspiration from https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning( "Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/multistep_lr.py
""" MultiStep LR Scheduler Basic multi step LR schedule with warmup, noise. """ import torch import bisect from timm.scheduler.scheduler import Scheduler from typing import List class MultiStepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: List[int], decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def get_curr_decay_steps(self, t): # find where in the array t goes, # assumes self.decay_t is sorted return bisect.bisect_right(self.decay_t, t + 1) def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] return lrs
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/scheduler.py
import abc from abc import ABC from typing import Any, Dict, Optional import torch class Scheduler(ABC): """ Parameter Scheduler Base Class A scheduler base class that can be used to schedule any optimizer parameter groups. Unlike the builtin PyTorch schedulers, this is intended to be consistently called * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value * At the END of each optimizer update, after incrementing the update count, to calculate next update's value The schedulers built on this should try to remain as stateless as possible (for simplicity). This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' and -1 values for special behaviour. All epoch and update counts must be tracked in the training code and explicitly passed in to the schedulers on the corresponding step or step_update call. Based on ideas from: * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers """ def __init__( self, optimizer: torch.optim.Optimizer, param_group_field: str, t_in_epochs: bool = True, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool = True, ) -> None: self.optimizer = optimizer self.param_group_field = param_group_field self._initial_param_group_field = f"initial_{param_group_field}" if initialize: for i, group in enumerate(self.optimizer.param_groups): if param_group_field not in group: raise KeyError(f"{param_group_field} missing from param_groups[{i}]") group.setdefault(self._initial_param_group_field, group[param_group_field]) else: for i, group in enumerate(self.optimizer.param_groups): if self._initial_param_group_field not in group: raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] self.metric = None # any point to having this for all? self.t_in_epochs = t_in_epochs self.noise_range_t = noise_range_t self.noise_pct = noise_pct self.noise_type = noise_type self.noise_std = noise_std self.noise_seed = noise_seed if noise_seed is not None else 42 self.update_groups(self.base_values) def state_dict(self) -> Dict[str, Any]: return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod def _get_lr(self, t: int) -> float: pass def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None return self._get_lr(t) def step(self, epoch: int, metric: float = None) -> None: self.metric = metric values = self._get_values(epoch, on_epoch=True) if values is not None: values = self._add_noise(values, epoch) self.update_groups(values) def step_update(self, num_updates: int, metric: float = None): self.metric = metric values = self._get_values(num_updates, on_epoch=False) if values is not None: values = self._add_noise(values, num_updates) self.update_groups(values) def update_groups(self, values): if not isinstance(values, (list, tuple)): values = [values] * len(self.optimizer.param_groups) for param_group, value in zip(self.optimizer.param_groups, values): if 'lr_scale' in param_group: param_group[self.param_group_field] = value * param_group['lr_scale'] else: param_group[self.param_group_field] = value def _add_noise(self, lrs, t): if self._is_apply_noise(t): noise = self._calculate_noise(t) lrs = [v + v * noise for v in lrs] return lrs def _is_apply_noise(self, t) -> bool: """Return True if scheduler in noise range.""" apply_noise = False if self.noise_range_t is not None: if isinstance(self.noise_range_t, (list, tuple)): apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] else: apply_noise = t >= self.noise_range_t return apply_noise def _calculate_noise(self, t) -> float: g = torch.Generator() g.manual_seed(self.noise_seed + t) if self.noise_type == 'normal': while True: # resample if noise out of percent limit, brute force but shouldn't spin much noise = torch.randn(1, generator=g).item() if abs(noise) < self.noise_pct: return noise else: noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct return noise
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/tanh_lr.py
""" TanH Scheduler TanH schedule with warmup, cycle/restarts, noise. Hacked together by / Copyright 2021 Ross Wightman """ import logging import math import numpy as np import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class TanhLRScheduler(Scheduler): """ Hyberbolic-Tangent decay with restarts. This is described in the paper https://arxiv.org/abs/1806.01593 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, lb: float = -7., ub: float = 3., lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) assert t_initial > 0 assert lr_min >= 0 assert lb < ub assert cycle_limit >= 0 assert warmup_t >= 0 assert warmup_lr_init >= 0 self.lb = lb self.ub = ub self.t_initial = t_initial self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) if i < self.cycle_limit: gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] tr = t_curr / t_i lrs = [ self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
0
hf_public_repos/pytorch-image-models/timm
hf_public_repos/pytorch-image-models/timm/scheduler/step_lr.py
""" Step Scheduler Basic step LR schedule with warmup, noise. Hacked together by / Copyright 2020 Ross Wightman """ import math import torch from .scheduler import Scheduler class StepLRScheduler(Scheduler): """ """ def __init__( self, optimizer: torch.optim.Optimizer, decay_t: float, decay_rate: float = 1., warmup_t=0, warmup_lr_init=0, warmup_prefix=True, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize, ) self.decay_t = decay_t self.decay_rate = decay_rate self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] return lrs
0