meg HF staff commited on
Commit
e411e4d
·
verified ·
1 Parent(s): ad283e4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc +0 -0
  2. pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc +0 -0
  3. pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc +0 -0
  4. pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc +0 -0
  5. pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc +0 -0
  6. pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc +0 -0
  7. pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc +0 -0
  8. pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc +0 -0
  9. pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc +0 -0
  10. pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc +0 -0
  11. pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc +0 -0
  12. pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc +0 -0
  13. pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc +0 -0
  14. pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc +0 -0
  15. pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc +0 -0
  16. pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc +0 -0
  17. pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc +0 -0
  18. pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc +0 -0
  19. pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc +0 -0
  20. pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc +0 -0
  21. pytorch-image-models/timm/loss/asymmetric_loss.py +97 -0
  22. pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc +0 -0
  23. pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc +0 -0
  24. pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc +0 -0
  25. pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc +0 -0
  26. pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc +0 -0
  27. pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc +0 -0
  28. pytorch-image-models/timm/models/_efficientnet_builder.py +576 -0
  29. pytorch-image-models/timm/models/_helpers.py +166 -0
  30. pytorch-image-models/timm/models/_manipulate.py +278 -0
  31. pytorch-image-models/timm/models/_pretrained.py +94 -0
  32. pytorch-image-models/timm/models/_prune.py +116 -0
  33. pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt +1 -0
  34. pytorch-image-models/timm/models/focalnet.py +652 -0
  35. pytorch-image-models/timm/models/fx_features.py +4 -0
  36. pytorch-image-models/timm/models/gcvit.py +592 -0
  37. pytorch-image-models/timm/models/ghostnet.py +433 -0
  38. pytorch-image-models/timm/models/hardcorenas.py +156 -0
  39. pytorch-image-models/timm/models/helpers.py +7 -0
  40. pytorch-image-models/timm/models/hgnet.py +738 -0
  41. pytorch-image-models/timm/models/hiera.py +996 -0
  42. pytorch-image-models/timm/models/hieradet_sam2.py +635 -0
  43. pytorch-image-models/timm/models/hub.py +4 -0
  44. pytorch-image-models/timm/models/inception_next.py +445 -0
  45. pytorch-image-models/timm/models/inception_resnet_v2.py +341 -0
  46. pytorch-image-models/timm/models/inception_v3.py +458 -0
  47. pytorch-image-models/timm/models/inception_v4.py +325 -0
  48. pytorch-image-models/timm/models/layers/__init__.py +48 -0
  49. pytorch-image-models/timm/models/levit.py +997 -0
  50. pytorch-image-models/timm/models/mambaout.py +642 -0
pytorch-image-models/timm/layers/__pycache__/cbam.cpython-39.pyc ADDED
Binary file (5.07 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/classifier.cpython-39.pyc ADDED
Binary file (7.84 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/config.cpython-39.pyc ADDED
Binary file (4.19 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/conv2d_same.cpython-39.pyc ADDED
Binary file (3.02 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/create_act.cpython-39.pyc ADDED
Binary file (3.26 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/create_conv2d.cpython-39.pyc ADDED
Binary file (1.08 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/create_norm.cpython-39.pyc ADDED
Binary file (1.58 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/create_norm_act.cpython-39.pyc ADDED
Binary file (2.67 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/evo_norm.cpython-39.pyc ADDED
Binary file (12.3 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/fast_norm.cpython-39.pyc ADDED
Binary file (3.49 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/filter_response_norm.cpython-39.pyc ADDED
Binary file (3.09 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/global_context.cpython-39.pyc ADDED
Binary file (2.38 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/interpolate.cpython-39.pyc ADDED
Binary file (2.42 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/mixed_conv2d.cpython-39.pyc ADDED
Binary file (2.22 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/norm.cpython-39.pyc ADDED
Binary file (7.07 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/norm_act.cpython-39.pyc ADDED
Binary file (11.6 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/padding.cpython-39.pyc ADDED
Binary file (3 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/patch_dropout.cpython-39.pyc ADDED
Binary file (1.73 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/space_to_depth.cpython-39.pyc ADDED
Binary file (1.53 kB). View file
 
pytorch-image-models/timm/layers/__pycache__/split_batchnorm.cpython-39.pyc ADDED
Binary file (3.3 kB). View file
 
pytorch-image-models/timm/loss/asymmetric_loss.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class AsymmetricLossMultiLabel(nn.Module):
6
+ def __init__(self, gamma_neg=4, gamma_pos=1, clip=0.05, eps=1e-8, disable_torch_grad_focal_loss=False):
7
+ super(AsymmetricLossMultiLabel, self).__init__()
8
+
9
+ self.gamma_neg = gamma_neg
10
+ self.gamma_pos = gamma_pos
11
+ self.clip = clip
12
+ self.disable_torch_grad_focal_loss = disable_torch_grad_focal_loss
13
+ self.eps = eps
14
+
15
+ def forward(self, x, y):
16
+ """"
17
+ Parameters
18
+ ----------
19
+ x: input logits
20
+ y: targets (multi-label binarized vector)
21
+ """
22
+
23
+ # Calculating Probabilities
24
+ x_sigmoid = torch.sigmoid(x)
25
+ xs_pos = x_sigmoid
26
+ xs_neg = 1 - x_sigmoid
27
+
28
+ # Asymmetric Clipping
29
+ if self.clip is not None and self.clip > 0:
30
+ xs_neg = (xs_neg + self.clip).clamp(max=1)
31
+
32
+ # Basic CE calculation
33
+ los_pos = y * torch.log(xs_pos.clamp(min=self.eps))
34
+ los_neg = (1 - y) * torch.log(xs_neg.clamp(min=self.eps))
35
+ loss = los_pos + los_neg
36
+
37
+ # Asymmetric Focusing
38
+ if self.gamma_neg > 0 or self.gamma_pos > 0:
39
+ if self.disable_torch_grad_focal_loss:
40
+ torch.set_grad_enabled(False)
41
+ pt0 = xs_pos * y
42
+ pt1 = xs_neg * (1 - y) # pt = p if t > 0 else 1-p
43
+ pt = pt0 + pt1
44
+ one_sided_gamma = self.gamma_pos * y + self.gamma_neg * (1 - y)
45
+ one_sided_w = torch.pow(1 - pt, one_sided_gamma)
46
+ if self.disable_torch_grad_focal_loss:
47
+ torch.set_grad_enabled(True)
48
+ loss *= one_sided_w
49
+
50
+ return -loss.sum()
51
+
52
+
53
+ class AsymmetricLossSingleLabel(nn.Module):
54
+ def __init__(self, gamma_pos=1, gamma_neg=4, eps: float = 0.1, reduction='mean'):
55
+ super(AsymmetricLossSingleLabel, self).__init__()
56
+
57
+ self.eps = eps
58
+ self.logsoftmax = nn.LogSoftmax(dim=-1)
59
+ self.targets_classes = [] # prevent gpu repeated memory allocation
60
+ self.gamma_pos = gamma_pos
61
+ self.gamma_neg = gamma_neg
62
+ self.reduction = reduction
63
+
64
+ def forward(self, inputs, target, reduction=None):
65
+ """"
66
+ Parameters
67
+ ----------
68
+ x: input logits
69
+ y: targets (1-hot vector)
70
+ """
71
+
72
+ num_classes = inputs.size()[-1]
73
+ log_preds = self.logsoftmax(inputs)
74
+ self.targets_classes = torch.zeros_like(inputs).scatter_(1, target.long().unsqueeze(1), 1)
75
+
76
+ # ASL weights
77
+ targets = self.targets_classes
78
+ anti_targets = 1 - targets
79
+ xs_pos = torch.exp(log_preds)
80
+ xs_neg = 1 - xs_pos
81
+ xs_pos = xs_pos * targets
82
+ xs_neg = xs_neg * anti_targets
83
+ asymmetric_w = torch.pow(1 - xs_pos - xs_neg,
84
+ self.gamma_pos * targets + self.gamma_neg * anti_targets)
85
+ log_preds = log_preds * asymmetric_w
86
+
87
+ if self.eps > 0: # label smoothing
88
+ self.targets_classes = self.targets_classes.mul(1 - self.eps).add(self.eps / num_classes)
89
+
90
+ # loss calculation
91
+ loss = - self.targets_classes.mul(log_preds)
92
+
93
+ loss = loss.sum(dim=-1)
94
+ if self.reduction == 'mean':
95
+ loss = loss.mean()
96
+
97
+ return loss
pytorch-image-models/timm/models/__pycache__/_features_fx.cpython-39.pyc ADDED
Binary file (5.81 kB). View file
 
pytorch-image-models/timm/models/__pycache__/_hub.cpython-39.pyc ADDED
Binary file (11.2 kB). View file
 
pytorch-image-models/timm/models/__pycache__/beit.cpython-39.pyc ADDED
Binary file (20.3 kB). View file
 
pytorch-image-models/timm/models/__pycache__/mambaout.cpython-39.pyc ADDED
Binary file (16.9 kB). View file
 
pytorch-image-models/timm/models/__pycache__/metaformer.cpython-39.pyc ADDED
Binary file (26.6 kB). View file
 
pytorch-image-models/timm/models/__pycache__/mlp_mixer.cpython-39.pyc ADDED
Binary file (23.4 kB). View file
 
pytorch-image-models/timm/models/_efficientnet_builder.py ADDED
@@ -0,0 +1,576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ EfficientNet, MobileNetV3, etc Builder
2
+
3
+ Assembles EfficieNet and related network feature blocks from string definitions.
4
+ Handles stride, dilation calculations, and selects feature extraction points.
5
+
6
+ Hacked together by / Copyright 2019, Ross Wightman
7
+ """
8
+ from typing import Callable, Optional
9
+
10
+ import logging
11
+ import math
12
+ import re
13
+ from copy import deepcopy
14
+ from functools import partial
15
+ from typing import Any, Dict, List
16
+
17
+ import torch.nn as nn
18
+
19
+ from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible, LayerType
20
+ from ._efficientnet_blocks import *
21
+ from ._manipulate import named_modules
22
+
23
+ __all__ = ["EfficientNetBuilder", "BlockArgs", "decode_arch_def", "efficientnet_init_weights",
24
+ 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT']
25
+
26
+ _logger = logging.getLogger(__name__)
27
+
28
+
29
+ _DEBUG_BUILDER = False
30
+
31
+ # Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per
32
+ # papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay)
33
+ # NOTE: momentum varies btw .99 and .9997 depending on source
34
+ # .99 in official TF TPU impl
35
+ # .9997 (/w .999 in search space) for paper
36
+ BN_MOMENTUM_TF_DEFAULT = 1 - 0.99
37
+ BN_EPS_TF_DEFAULT = 1e-3
38
+ _BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT)
39
+
40
+ BlockArgs = List[List[Dict[str, Any]]]
41
+
42
+
43
+ def get_bn_args_tf():
44
+ return _BN_ARGS_TF.copy()
45
+
46
+
47
+ def resolve_bn_args(kwargs):
48
+ bn_args = {}
49
+ bn_momentum = kwargs.pop('bn_momentum', None)
50
+ if bn_momentum is not None:
51
+ bn_args['momentum'] = bn_momentum
52
+ bn_eps = kwargs.pop('bn_eps', None)
53
+ if bn_eps is not None:
54
+ bn_args['eps'] = bn_eps
55
+ return bn_args
56
+
57
+
58
+ def resolve_act_layer(kwargs, default='relu'):
59
+ return get_act_layer(kwargs.pop('act_layer', default))
60
+
61
+
62
+ def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9):
63
+ """Round number of filters based on depth multiplier."""
64
+ if not multiplier:
65
+ return channels
66
+ return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit)
67
+
68
+
69
+ def _log_info_if(msg, condition):
70
+ if condition:
71
+ _logger.info(msg)
72
+
73
+
74
+ def _parse_ksize(ss):
75
+ if ss.isdigit():
76
+ return int(ss)
77
+ else:
78
+ return [int(k) for k in ss.split('.')]
79
+
80
+
81
+ def _decode_block_str(block_str):
82
+ """ Decode block definition string
83
+
84
+ Gets a list of block arg (dicts) through a string notation of arguments.
85
+ E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip
86
+
87
+ All args can exist in any order with the exception of the leading string which
88
+ is assumed to indicate the block type.
89
+
90
+ leading string - block type (
91
+ ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct)
92
+ r - number of repeat blocks,
93
+ k - kernel size,
94
+ s - strides (1-9),
95
+ e - expansion ratio,
96
+ c - output channels,
97
+ se - squeeze/excitation ratio
98
+ n - activation fn ('re', 'r6', 'hs', or 'sw')
99
+ Args:
100
+ block_str: a string representation of block arguments.
101
+ Returns:
102
+ A list of block args (dicts)
103
+ Raises:
104
+ ValueError: if the string def not properly specified (TODO)
105
+ """
106
+ assert isinstance(block_str, str)
107
+ ops = block_str.split('_')
108
+ block_type = ops[0] # take the block type off the front
109
+ ops = ops[1:]
110
+ options = {}
111
+ skip = None
112
+ for op in ops:
113
+ # string options being checked on individual basis, combine if they grow
114
+ if op == 'noskip':
115
+ skip = False # force no skip connection
116
+ elif op == 'skip':
117
+ skip = True # force a skip connection
118
+ elif op.startswith('n'):
119
+ # activation fn
120
+ key = op[0]
121
+ v = op[1:]
122
+ if v == 're':
123
+ value = get_act_layer('relu')
124
+ elif v == 'r6':
125
+ value = get_act_layer('relu6')
126
+ elif v == 'hs':
127
+ value = get_act_layer('hard_swish')
128
+ elif v == 'sw':
129
+ value = get_act_layer('swish') # aka SiLU
130
+ elif v == 'mi':
131
+ value = get_act_layer('mish')
132
+ else:
133
+ continue
134
+ options[key] = value
135
+ else:
136
+ # all numeric options
137
+ splits = re.split(r'(\d.*)', op)
138
+ if len(splits) >= 2:
139
+ key, value = splits[:2]
140
+ options[key] = value
141
+
142
+ # if act_layer is None, the model default (passed to model init) will be used
143
+ act_layer = options['n'] if 'n' in options else None
144
+ start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1
145
+ end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1
146
+ force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def
147
+ num_repeat = int(options['r'])
148
+
149
+ # each type of block has different valid arguments, fill accordingly
150
+ block_args = dict(
151
+ block_type=block_type,
152
+ out_chs=int(options['c']),
153
+ stride=int(options['s']),
154
+ act_layer=act_layer,
155
+ )
156
+ if block_type == 'ir':
157
+ block_args.update(dict(
158
+ dw_kernel_size=_parse_ksize(options['k']),
159
+ exp_kernel_size=start_kernel_size,
160
+ pw_kernel_size=end_kernel_size,
161
+ exp_ratio=float(options['e']),
162
+ se_ratio=float(options.get('se', 0.)),
163
+ noskip=skip is False,
164
+ s2d=int(options.get('d', 0)) > 0,
165
+ ))
166
+ if 'cc' in options:
167
+ block_args['num_experts'] = int(options['cc'])
168
+ elif block_type == 'ds' or block_type == 'dsa':
169
+ block_args.update(dict(
170
+ dw_kernel_size=_parse_ksize(options['k']),
171
+ pw_kernel_size=end_kernel_size,
172
+ se_ratio=float(options.get('se', 0.)),
173
+ pw_act=block_type == 'dsa',
174
+ noskip=block_type == 'dsa' or skip is False,
175
+ s2d=int(options.get('d', 0)) > 0,
176
+ ))
177
+ elif block_type == 'er':
178
+ block_args.update(dict(
179
+ exp_kernel_size=_parse_ksize(options['k']),
180
+ pw_kernel_size=end_kernel_size,
181
+ exp_ratio=float(options['e']),
182
+ force_in_chs=force_in_chs,
183
+ se_ratio=float(options.get('se', 0.)),
184
+ noskip=skip is False,
185
+ ))
186
+ elif block_type == 'cn':
187
+ block_args.update(dict(
188
+ kernel_size=int(options['k']),
189
+ skip=skip is True,
190
+ ))
191
+ elif block_type == 'uir':
192
+ # override exp / proj kernels for start/end in uir block
193
+ start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 0
194
+ end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 0
195
+ block_args.update(dict(
196
+ dw_kernel_size_start=start_kernel_size, # overload exp ks arg for dw start
197
+ dw_kernel_size_mid=_parse_ksize(options['k']),
198
+ dw_kernel_size_end=end_kernel_size, # overload pw ks arg for dw end
199
+ exp_ratio=float(options['e']),
200
+ se_ratio=float(options.get('se', 0.)),
201
+ noskip=skip is False,
202
+ ))
203
+ elif block_type == 'mha':
204
+ kv_dim = int(options['d'])
205
+ block_args.update(dict(
206
+ dw_kernel_size=_parse_ksize(options['k']),
207
+ num_heads=int(options['h']),
208
+ key_dim=kv_dim,
209
+ value_dim=kv_dim,
210
+ kv_stride=int(options.get('v', 1)),
211
+ noskip=skip is False,
212
+ ))
213
+ elif block_type == 'mqa':
214
+ kv_dim = int(options['d'])
215
+ block_args.update(dict(
216
+ dw_kernel_size=_parse_ksize(options['k']),
217
+ num_heads=int(options['h']),
218
+ key_dim=kv_dim,
219
+ value_dim=kv_dim,
220
+ kv_stride=int(options.get('v', 1)),
221
+ noskip=skip is False,
222
+ ))
223
+ else:
224
+ assert False, 'Unknown block type (%s)' % block_type
225
+
226
+ if 'gs' in options:
227
+ block_args['group_size'] = int(options['gs'])
228
+
229
+ return block_args, num_repeat
230
+
231
+
232
+ def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'):
233
+ """ Per-stage depth scaling
234
+ Scales the block repeats in each stage. This depth scaling impl maintains
235
+ compatibility with the EfficientNet scaling method, while allowing sensible
236
+ scaling for other models that may have multiple block arg definitions in each stage.
237
+ """
238
+
239
+ # We scale the total repeat count for each stage, there may be multiple
240
+ # block arg defs per stage so we need to sum.
241
+ num_repeat = sum(repeats)
242
+ if depth_trunc == 'round':
243
+ # Truncating to int by rounding allows stages with few repeats to remain
244
+ # proportionally smaller for longer. This is a good choice when stage definitions
245
+ # include single repeat stages that we'd prefer to keep that way as long as possible
246
+ num_repeat_scaled = max(1, round(num_repeat * depth_multiplier))
247
+ else:
248
+ # The default for EfficientNet truncates repeats to int via 'ceil'.
249
+ # Any multiplier > 1.0 will result in an increased depth for every stage.
250
+ num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier))
251
+
252
+ # Proportionally distribute repeat count scaling to each block definition in the stage.
253
+ # Allocation is done in reverse as it results in the first block being less likely to be scaled.
254
+ # The first block makes less sense to repeat in most of the arch definitions.
255
+ repeats_scaled = []
256
+ for r in repeats[::-1]:
257
+ rs = max(1, round((r / num_repeat * num_repeat_scaled)))
258
+ repeats_scaled.append(rs)
259
+ num_repeat -= r
260
+ num_repeat_scaled -= rs
261
+ repeats_scaled = repeats_scaled[::-1]
262
+
263
+ # Apply the calculated scaling to each block arg in the stage
264
+ sa_scaled = []
265
+ for ba, rep in zip(stack_args, repeats_scaled):
266
+ sa_scaled.extend([deepcopy(ba) for _ in range(rep)])
267
+ return sa_scaled
268
+
269
+
270
+ def decode_arch_def(
271
+ arch_def,
272
+ depth_multiplier=1.0,
273
+ depth_trunc='ceil',
274
+ experts_multiplier=1,
275
+ fix_first_last=False,
276
+ group_size=None,
277
+ ):
278
+ """ Decode block architecture definition strings -> block kwargs
279
+
280
+ Args:
281
+ arch_def: architecture definition strings, list of list of strings
282
+ depth_multiplier: network depth multiplier
283
+ depth_trunc: networ depth truncation mode when applying multiplier
284
+ experts_multiplier: CondConv experts multiplier
285
+ fix_first_last: fix first and last block depths when multiplier is applied
286
+ group_size: group size override for all blocks that weren't explicitly set in arch string
287
+
288
+ Returns:
289
+ list of list of block kwargs
290
+ """
291
+ arch_args = []
292
+ if isinstance(depth_multiplier, tuple):
293
+ assert len(depth_multiplier) == len(arch_def)
294
+ else:
295
+ depth_multiplier = (depth_multiplier,) * len(arch_def)
296
+ for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)):
297
+ assert isinstance(block_strings, list)
298
+ stack_args = []
299
+ repeats = []
300
+ for block_str in block_strings:
301
+ assert isinstance(block_str, str)
302
+ ba, rep = _decode_block_str(block_str)
303
+ if ba.get('num_experts', 0) > 0 and experts_multiplier > 1:
304
+ ba['num_experts'] *= experts_multiplier
305
+ if group_size is not None:
306
+ ba.setdefault('group_size', group_size)
307
+ stack_args.append(ba)
308
+ repeats.append(rep)
309
+ if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1):
310
+ arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc))
311
+ else:
312
+ arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc))
313
+ return arch_args
314
+
315
+
316
+ class EfficientNetBuilder:
317
+ """ Build Trunk Blocks
318
+
319
+ This ended up being somewhat of a cross between
320
+ https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py
321
+ and
322
+ https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py
323
+
324
+ """
325
+ def __init__(
326
+ self,
327
+ output_stride: int = 32,
328
+ pad_type: str = '',
329
+ round_chs_fn: Callable = round_channels,
330
+ se_from_exp: bool = False,
331
+ act_layer: Optional[LayerType] = None,
332
+ norm_layer: Optional[LayerType] = None,
333
+ aa_layer: Optional[LayerType] = None,
334
+ se_layer: Optional[LayerType] = None,
335
+ drop_path_rate: float = 0.,
336
+ layer_scale_init_value: Optional[float] = None,
337
+ feature_location: str = '',
338
+ ):
339
+ self.output_stride = output_stride
340
+ self.pad_type = pad_type
341
+ self.round_chs_fn = round_chs_fn
342
+ self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs
343
+ self.act_layer = act_layer
344
+ self.norm_layer = norm_layer
345
+ self.aa_layer = aa_layer
346
+ self.se_layer = get_attn(se_layer)
347
+ try:
348
+ self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg
349
+ self.se_has_ratio = True
350
+ except TypeError:
351
+ self.se_has_ratio = False
352
+ self.drop_path_rate = drop_path_rate
353
+ self.layer_scale_init_value = layer_scale_init_value
354
+ if feature_location == 'depthwise':
355
+ # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense
356
+ _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'")
357
+ feature_location = 'expansion'
358
+ self.feature_location = feature_location
359
+ assert feature_location in ('bottleneck', 'expansion', '')
360
+ self.verbose = _DEBUG_BUILDER
361
+
362
+ # state updated during build, consumed by model
363
+ self.in_chs = None
364
+ self.features = []
365
+
366
+ def _make_block(self, ba, block_idx, block_count):
367
+ drop_path_rate = self.drop_path_rate * block_idx / block_count
368
+ bt = ba.pop('block_type')
369
+ ba['in_chs'] = self.in_chs
370
+ ba['out_chs'] = self.round_chs_fn(ba['out_chs'])
371
+ s2d = ba.get('s2d', 0)
372
+ if s2d > 0:
373
+ # adjust while space2depth active
374
+ ba['out_chs'] *= 4
375
+ if 'force_in_chs' in ba and ba['force_in_chs']:
376
+ # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl
377
+ ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs'])
378
+ ba['pad_type'] = self.pad_type
379
+ # block act fn overrides the model default
380
+ ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
381
+ assert ba['act_layer'] is not None
382
+ ba['norm_layer'] = self.norm_layer
383
+ ba['drop_path_rate'] = drop_path_rate
384
+
385
+ if self.aa_layer is not None:
386
+ ba['aa_layer'] = self.aa_layer
387
+
388
+ se_ratio = ba.pop('se_ratio', None)
389
+ if se_ratio and self.se_layer is not None:
390
+ if not self.se_from_exp:
391
+ # adjust se_ratio by expansion ratio if calculating se channels from block input
392
+ se_ratio /= ba.get('exp_ratio', 1.0)
393
+ if s2d == 1:
394
+ # adjust for start of space2depth
395
+ se_ratio /= 4
396
+ if self.se_has_ratio:
397
+ ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio)
398
+ else:
399
+ ba['se_layer'] = self.se_layer
400
+
401
+ if bt == 'ir':
402
+ _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
403
+ block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba)
404
+ elif bt == 'ds' or bt == 'dsa':
405
+ _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
406
+ block = DepthwiseSeparableConv(**ba)
407
+ elif bt == 'er':
408
+ _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
409
+ block = EdgeResidual(**ba)
410
+ elif bt == 'cn':
411
+ _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
412
+ block = ConvBnAct(**ba)
413
+ elif bt == 'uir':
414
+ _log_info_if(' UniversalInvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
415
+ block = UniversalInvertedResidual(**ba, layer_scale_init_value=self.layer_scale_init_value)
416
+ elif bt == 'mqa':
417
+ _log_info_if(' MobileMultiQueryAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
418
+ block = MobileAttention(**ba, use_multi_query=True, layer_scale_init_value=self.layer_scale_init_value)
419
+ elif bt == 'mha':
420
+ _log_info_if(' MobileMultiHeadAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose)
421
+ block = MobileAttention(**ba, layer_scale_init_value=self.layer_scale_init_value)
422
+ else:
423
+ assert False, 'Unknown block type (%s) while building model.' % bt
424
+
425
+ self.in_chs = ba['out_chs'] # update in_chs for arg of next block
426
+ return block
427
+
428
+ def __call__(self, in_chs, model_block_args):
429
+ """ Build the blocks
430
+ Args:
431
+ in_chs: Number of input-channels passed to first block
432
+ model_block_args: A list of lists, outer list defines stages, inner
433
+ list contains strings defining block configuration(s)
434
+ Return:
435
+ List of block stacks (each stack wrapped in nn.Sequential)
436
+ """
437
+ _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose)
438
+ self.in_chs = in_chs
439
+ total_block_count = sum([len(x) for x in model_block_args])
440
+ total_block_idx = 0
441
+ current_stride = 2
442
+ current_dilation = 1
443
+ stages = []
444
+ if model_block_args[0][0]['stride'] > 1:
445
+ # if the first block starts with a stride, we need to extract first level feat from stem
446
+ feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride)
447
+ self.features.append(feature_info)
448
+
449
+ # outer list of block_args defines the stacks
450
+ space2depth = 0
451
+ for stack_idx, stack_args in enumerate(model_block_args):
452
+ last_stack = stack_idx + 1 == len(model_block_args)
453
+ _log_info_if('Stack: {}'.format(stack_idx), self.verbose)
454
+ assert isinstance(stack_args, list)
455
+
456
+ blocks = []
457
+ # each stack (stage of blocks) contains a list of block arguments
458
+ for block_idx, block_args in enumerate(stack_args):
459
+ last_block = block_idx + 1 == len(stack_args)
460
+ _log_info_if(' Block: {}'.format(block_idx), self.verbose)
461
+
462
+ assert block_args['stride'] in (1, 2)
463
+ if block_idx >= 1: # only the first block in any stack can have a stride > 1
464
+ block_args['stride'] = 1
465
+
466
+ if not space2depth and block_args.pop('s2d', False):
467
+ assert block_args['stride'] == 1
468
+ space2depth = 1
469
+
470
+ if space2depth > 0:
471
+ # FIXME s2d is a WIP
472
+ if space2depth == 2 and block_args['stride'] == 2:
473
+ block_args['stride'] = 1
474
+ # to end s2d region, need to correct expansion and se ratio relative to input
475
+ block_args['exp_ratio'] /= 4
476
+ space2depth = 0
477
+ else:
478
+ block_args['s2d'] = space2depth
479
+
480
+ extract_features = False
481
+ if last_block:
482
+ next_stack_idx = stack_idx + 1
483
+ extract_features = next_stack_idx >= len(model_block_args) or \
484
+ model_block_args[next_stack_idx][0]['stride'] > 1
485
+
486
+ next_dilation = current_dilation
487
+ if block_args['stride'] > 1:
488
+ next_output_stride = current_stride * block_args['stride']
489
+ if next_output_stride > self.output_stride:
490
+ next_dilation = current_dilation * block_args['stride']
491
+ block_args['stride'] = 1
492
+ _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format(
493
+ self.output_stride), self.verbose)
494
+ else:
495
+ current_stride = next_output_stride
496
+ block_args['dilation'] = current_dilation
497
+ if next_dilation != current_dilation:
498
+ current_dilation = next_dilation
499
+
500
+ # create the block
501
+ block = self._make_block(block_args, total_block_idx, total_block_count)
502
+ blocks.append(block)
503
+
504
+ if space2depth == 1:
505
+ space2depth = 2
506
+
507
+ # stash feature module name and channel info for model feature extraction
508
+ if extract_features:
509
+ feature_info = dict(
510
+ stage=stack_idx + 1,
511
+ reduction=current_stride,
512
+ **block.feature_info(self.feature_location),
513
+ )
514
+ leaf_name = feature_info.get('module', '')
515
+ if leaf_name:
516
+ feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name])
517
+ else:
518
+ assert last_block
519
+ feature_info['module'] = f'blocks.{stack_idx}'
520
+ self.features.append(feature_info)
521
+
522
+ total_block_idx += 1 # incr global block idx (across all stacks)
523
+ stages.append(nn.Sequential(*blocks))
524
+ return stages
525
+
526
+
527
+ def _init_weight_goog(m, n='', fix_group_fanout=True):
528
+ """ Weight initialization as per Tensorflow official implementations.
529
+
530
+ Args:
531
+ m (nn.Module): module to init
532
+ n (str): module name
533
+ fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs
534
+
535
+ Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc:
536
+ * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py
537
+ * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py
538
+ """
539
+ if isinstance(m, CondConv2d):
540
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
541
+ if fix_group_fanout:
542
+ fan_out //= m.groups
543
+ init_weight_fn = get_condconv_initializer(
544
+ lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape)
545
+ init_weight_fn(m.weight)
546
+ if m.bias is not None:
547
+ nn.init.zeros_(m.bias)
548
+ elif isinstance(m, nn.Conv2d):
549
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
550
+ if fix_group_fanout:
551
+ fan_out //= m.groups
552
+ nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out))
553
+ if m.bias is not None:
554
+ nn.init.zeros_(m.bias)
555
+ elif isinstance(m, nn.BatchNorm2d):
556
+ nn.init.ones_(m.weight)
557
+ nn.init.zeros_(m.bias)
558
+ elif isinstance(m, nn.Linear):
559
+ fan_out = m.weight.size(0) # fan-out
560
+ fan_in = 0
561
+ if 'routing_fn' in n:
562
+ fan_in = m.weight.size(1)
563
+ init_range = 1.0 / math.sqrt(fan_in + fan_out)
564
+ nn.init.uniform_(m.weight, -init_range, init_range)
565
+ nn.init.zeros_(m.bias)
566
+
567
+
568
+ def efficientnet_init_weights(model: nn.Module, init_fn=None):
569
+ init_fn = init_fn or _init_weight_goog
570
+ for n, m in model.named_modules():
571
+ init_fn(m, n)
572
+
573
+ # iterate and call any module.init_weights() fn, children first
574
+ for n, m in named_modules(model):
575
+ if hasattr(m, 'init_weights'):
576
+ m.init_weights()
pytorch-image-models/timm/models/_helpers.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Model creation / weight loading / state_dict helpers
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import logging
6
+ import os
7
+ from typing import Any, Callable, Dict, Optional, Union
8
+
9
+ import torch
10
+ try:
11
+ import safetensors.torch
12
+ _has_safetensors = True
13
+ except ImportError:
14
+ _has_safetensors = False
15
+
16
+ _logger = logging.getLogger(__name__)
17
+
18
+ __all__ = ['clean_state_dict', 'load_state_dict', 'load_checkpoint', 'remap_state_dict', 'resume_checkpoint']
19
+
20
+
21
+ def _remove_prefix(text, prefix):
22
+ # FIXME replace with 3.9 stdlib fn when min at 3.9
23
+ if text.startswith(prefix):
24
+ return text[len(prefix):]
25
+ return text
26
+
27
+
28
+ def clean_state_dict(state_dict: Dict[str, Any]) -> Dict[str, Any]:
29
+ # 'clean' checkpoint by removing .module prefix from state dict if it exists from parallel training
30
+ cleaned_state_dict = {}
31
+ to_remove = (
32
+ 'module.', # DDP wrapper
33
+ '_orig_mod.', # torchcompile dynamo wrapper
34
+ )
35
+ for k, v in state_dict.items():
36
+ for r in to_remove:
37
+ k = _remove_prefix(k, r)
38
+ cleaned_state_dict[k] = v
39
+ return cleaned_state_dict
40
+
41
+
42
+ def load_state_dict(
43
+ checkpoint_path: str,
44
+ use_ema: bool = True,
45
+ device: Union[str, torch.device] = 'cpu',
46
+ weights_only: bool = False,
47
+ ) -> Dict[str, Any]:
48
+ if checkpoint_path and os.path.isfile(checkpoint_path):
49
+ # Check if safetensors or not and load weights accordingly
50
+ if str(checkpoint_path).endswith(".safetensors"):
51
+ assert _has_safetensors, "`pip install safetensors` to use .safetensors"
52
+ checkpoint = safetensors.torch.load_file(checkpoint_path, device=device)
53
+ else:
54
+ try:
55
+ checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=weights_only)
56
+ except TypeError:
57
+ checkpoint = torch.load(checkpoint_path, map_location=device)
58
+
59
+ state_dict_key = ''
60
+ if isinstance(checkpoint, dict):
61
+ if use_ema and checkpoint.get('state_dict_ema', None) is not None:
62
+ state_dict_key = 'state_dict_ema'
63
+ elif use_ema and checkpoint.get('model_ema', None) is not None:
64
+ state_dict_key = 'model_ema'
65
+ elif 'state_dict' in checkpoint:
66
+ state_dict_key = 'state_dict'
67
+ elif 'model' in checkpoint:
68
+ state_dict_key = 'model'
69
+ state_dict = clean_state_dict(checkpoint[state_dict_key] if state_dict_key else checkpoint)
70
+ _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
71
+ return state_dict
72
+ else:
73
+ _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
74
+ raise FileNotFoundError()
75
+
76
+
77
+ def load_checkpoint(
78
+ model: torch.nn.Module,
79
+ checkpoint_path: str,
80
+ use_ema: bool = True,
81
+ device: Union[str, torch.device] = 'cpu',
82
+ strict: bool = True,
83
+ remap: bool = False,
84
+ filter_fn: Optional[Callable] = None,
85
+ weights_only: bool = False,
86
+ ):
87
+ if os.path.splitext(checkpoint_path)[-1].lower() in ('.npz', '.npy'):
88
+ # numpy checkpoint, try to load via model specific load_pretrained fn
89
+ if hasattr(model, 'load_pretrained'):
90
+ model.load_pretrained(checkpoint_path)
91
+ else:
92
+ raise NotImplementedError('Model cannot load numpy checkpoint')
93
+ return
94
+
95
+ state_dict = load_state_dict(checkpoint_path, use_ema, device=device, weights_only=weights_only)
96
+ if remap:
97
+ state_dict = remap_state_dict(state_dict, model)
98
+ elif filter_fn:
99
+ state_dict = filter_fn(state_dict, model)
100
+ incompatible_keys = model.load_state_dict(state_dict, strict=strict)
101
+ return incompatible_keys
102
+
103
+
104
+ def remap_state_dict(
105
+ state_dict: Dict[str, Any],
106
+ model: torch.nn.Module,
107
+ allow_reshape: bool = True
108
+ ):
109
+ """ remap checkpoint by iterating over state dicts in order (ignoring original keys).
110
+ This assumes models (and originating state dict) were created with params registered in same order.
111
+ """
112
+ out_dict = {}
113
+ for (ka, va), (kb, vb) in zip(model.state_dict().items(), state_dict.items()):
114
+ assert va.numel() == vb.numel(), f'Tensor size mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
115
+ if va.shape != vb.shape:
116
+ if allow_reshape:
117
+ vb = vb.reshape(va.shape)
118
+ else:
119
+ assert False, f'Tensor shape mismatch {ka}: {va.shape} vs {kb}: {vb.shape}. Remap failed.'
120
+ out_dict[ka] = vb
121
+ return out_dict
122
+
123
+
124
+ def resume_checkpoint(
125
+ model: torch.nn.Module,
126
+ checkpoint_path: str,
127
+ optimizer: torch.optim.Optimizer = None,
128
+ loss_scaler: Any = None,
129
+ log_info: bool = True,
130
+ ):
131
+ resume_epoch = None
132
+ if os.path.isfile(checkpoint_path):
133
+ checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False)
134
+ if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
135
+ if log_info:
136
+ _logger.info('Restoring model state from checkpoint...')
137
+ state_dict = clean_state_dict(checkpoint['state_dict'])
138
+ model.load_state_dict(state_dict)
139
+
140
+ if optimizer is not None and 'optimizer' in checkpoint:
141
+ if log_info:
142
+ _logger.info('Restoring optimizer state from checkpoint...')
143
+ optimizer.load_state_dict(checkpoint['optimizer'])
144
+
145
+ if loss_scaler is not None and loss_scaler.state_dict_key in checkpoint:
146
+ if log_info:
147
+ _logger.info('Restoring AMP loss scaler state from checkpoint...')
148
+ loss_scaler.load_state_dict(checkpoint[loss_scaler.state_dict_key])
149
+
150
+ if 'epoch' in checkpoint:
151
+ resume_epoch = checkpoint['epoch']
152
+ if 'version' in checkpoint and checkpoint['version'] > 1:
153
+ resume_epoch += 1 # start at the next epoch, old checkpoints incremented before save
154
+
155
+ if log_info:
156
+ _logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
157
+ else:
158
+ model.load_state_dict(checkpoint)
159
+ if log_info:
160
+ _logger.info("Loaded checkpoint '{}'".format(checkpoint_path))
161
+ return resume_epoch
162
+ else:
163
+ _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
164
+ raise FileNotFoundError()
165
+
166
+
pytorch-image-models/timm/models/_manipulate.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections.abc
2
+ import math
3
+ import re
4
+ from collections import defaultdict
5
+ from itertools import chain
6
+ from typing import Any, Callable, Dict, Iterator, Tuple, Type, Union
7
+
8
+ import torch
9
+ from torch import nn as nn
10
+ from torch.utils.checkpoint import checkpoint
11
+
12
+ __all__ = ['model_parameters', 'named_apply', 'named_modules', 'named_modules_with_params', 'adapt_input_conv',
13
+ 'group_with_matcher', 'group_modules', 'group_parameters', 'flatten_modules', 'checkpoint_seq']
14
+
15
+
16
+ def model_parameters(model: nn.Module, exclude_head: bool = False):
17
+ if exclude_head:
18
+ # FIXME this a bit of a quick and dirty hack to skip classifier head params based on ordering
19
+ return [p for p in model.parameters()][:-2]
20
+ else:
21
+ return model.parameters()
22
+
23
+
24
+ def named_apply(
25
+ fn: Callable,
26
+ module: nn.Module, name='',
27
+ depth_first: bool = True,
28
+ include_root: bool = False,
29
+ ) -> nn.Module:
30
+ if not depth_first and include_root:
31
+ fn(module=module, name=name)
32
+ for child_name, child_module in module.named_children():
33
+ child_name = '.'.join((name, child_name)) if name else child_name
34
+ named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
35
+ if depth_first and include_root:
36
+ fn(module=module, name=name)
37
+ return module
38
+
39
+
40
+ def named_modules(
41
+ module: nn.Module,
42
+ name: str = '',
43
+ depth_first: bool = True,
44
+ include_root: bool = False,
45
+ ):
46
+ if not depth_first and include_root:
47
+ yield name, module
48
+ for child_name, child_module in module.named_children():
49
+ child_name = '.'.join((name, child_name)) if name else child_name
50
+ yield from named_modules(
51
+ module=child_module, name=child_name, depth_first=depth_first, include_root=True)
52
+ if depth_first and include_root:
53
+ yield name, module
54
+
55
+
56
+ def named_modules_with_params(
57
+ module: nn.Module,
58
+ name: str = '',
59
+ depth_first: bool = True,
60
+ include_root: bool = False,
61
+ ):
62
+ if module._parameters and not depth_first and include_root:
63
+ yield name, module
64
+ for child_name, child_module in module.named_children():
65
+ child_name = '.'.join((name, child_name)) if name else child_name
66
+ yield from named_modules_with_params(
67
+ module=child_module, name=child_name, depth_first=depth_first, include_root=True)
68
+ if module._parameters and depth_first and include_root:
69
+ yield name, module
70
+
71
+
72
+ MATCH_PREV_GROUP = (99999,)
73
+
74
+
75
+ def group_with_matcher(
76
+ named_objects: Iterator[Tuple[str, Any]],
77
+ group_matcher: Union[Dict, Callable],
78
+ return_values: bool = False,
79
+ reverse: bool = False
80
+ ):
81
+ if isinstance(group_matcher, dict):
82
+ # dictionary matcher contains a dict of raw-string regex expr that must be compiled
83
+ compiled = []
84
+ for group_ordinal, (group_name, mspec) in enumerate(group_matcher.items()):
85
+ if mspec is None:
86
+ continue
87
+ # map all matching specifications into 3-tuple (compiled re, prefix, suffix)
88
+ if isinstance(mspec, (tuple, list)):
89
+ # multi-entry match specifications require each sub-spec to be a 2-tuple (re, suffix)
90
+ for sspec in mspec:
91
+ compiled += [(re.compile(sspec[0]), (group_ordinal,), sspec[1])]
92
+ else:
93
+ compiled += [(re.compile(mspec), (group_ordinal,), None)]
94
+ group_matcher = compiled
95
+
96
+ def _get_grouping(name):
97
+ if isinstance(group_matcher, (list, tuple)):
98
+ for match_fn, prefix, suffix in group_matcher:
99
+ r = match_fn.match(name)
100
+ if r:
101
+ parts = (prefix, r.groups(), suffix)
102
+ # map all tuple elem to int for numeric sort, filter out None entries
103
+ return tuple(map(float, chain.from_iterable(filter(None, parts))))
104
+ return float('inf'), # un-matched layers (neck, head) mapped to largest ordinal
105
+ else:
106
+ ord = group_matcher(name)
107
+ if not isinstance(ord, collections.abc.Iterable):
108
+ return ord,
109
+ return tuple(ord)
110
+
111
+ # map layers into groups via ordinals (ints or tuples of ints) from matcher
112
+ grouping = defaultdict(list)
113
+ for k, v in named_objects:
114
+ grouping[_get_grouping(k)].append(v if return_values else k)
115
+
116
+ # remap to integers
117
+ layer_id_to_param = defaultdict(list)
118
+ lid = -1
119
+ for k in sorted(filter(lambda x: x is not None, grouping.keys())):
120
+ if lid < 0 or k[-1] != MATCH_PREV_GROUP[0]:
121
+ lid += 1
122
+ layer_id_to_param[lid].extend(grouping[k])
123
+
124
+ if reverse:
125
+ assert not return_values, "reverse mapping only sensible for name output"
126
+ # output reverse mapping
127
+ param_to_layer_id = {}
128
+ for lid, lm in layer_id_to_param.items():
129
+ for n in lm:
130
+ param_to_layer_id[n] = lid
131
+ return param_to_layer_id
132
+
133
+ return layer_id_to_param
134
+
135
+
136
+ def group_parameters(
137
+ module: nn.Module,
138
+ group_matcher,
139
+ return_values: bool = False,
140
+ reverse: bool = False,
141
+ ):
142
+ return group_with_matcher(
143
+ module.named_parameters(), group_matcher, return_values=return_values, reverse=reverse)
144
+
145
+
146
+ def group_modules(
147
+ module: nn.Module,
148
+ group_matcher,
149
+ return_values: bool = False,
150
+ reverse: bool = False,
151
+ ):
152
+ return group_with_matcher(
153
+ named_modules_with_params(module), group_matcher, return_values=return_values, reverse=reverse)
154
+
155
+
156
+ def flatten_modules(
157
+ named_modules: Iterator[Tuple[str, nn.Module]],
158
+ depth: int = 1,
159
+ prefix: Union[str, Tuple[str, ...]] = '',
160
+ module_types: Union[str, Tuple[Type[nn.Module]]] = 'sequential',
161
+ ):
162
+ prefix_is_tuple = isinstance(prefix, tuple)
163
+ if isinstance(module_types, str):
164
+ if module_types == 'container':
165
+ module_types = (nn.Sequential, nn.ModuleList, nn.ModuleDict)
166
+ else:
167
+ module_types = (nn.Sequential,)
168
+ for name, module in named_modules:
169
+ if depth and isinstance(module, module_types):
170
+ yield from flatten_modules(
171
+ module.named_children(),
172
+ depth - 1,
173
+ prefix=(name,) if prefix_is_tuple else name,
174
+ module_types=module_types,
175
+ )
176
+ else:
177
+ if prefix_is_tuple:
178
+ name = prefix + (name,)
179
+ yield name, module
180
+ else:
181
+ if prefix:
182
+ name = '.'.join([prefix, name])
183
+ yield name, module
184
+
185
+
186
+ def checkpoint_seq(
187
+ functions,
188
+ x,
189
+ every=1,
190
+ flatten=False,
191
+ skip_last=False,
192
+ preserve_rng_state=True
193
+ ):
194
+ r"""A helper function for checkpointing sequential models.
195
+
196
+ Sequential models execute a list of modules/functions in order
197
+ (sequentially). Therefore, we can divide such a sequence into segments
198
+ and checkpoint each segment. All segments except run in :func:`torch.no_grad`
199
+ manner, i.e., not storing the intermediate activations. The inputs of each
200
+ checkpointed segment will be saved for re-running the segment in the backward pass.
201
+
202
+ See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works.
203
+
204
+ .. warning::
205
+ Checkpointing currently only supports :func:`torch.autograd.backward`
206
+ and only if its `inputs` argument is not passed. :func:`torch.autograd.grad`
207
+ is not supported.
208
+
209
+ .. warning:
210
+ At least one of the inputs needs to have :code:`requires_grad=True` if
211
+ grads are needed for model inputs, otherwise the checkpointed part of the
212
+ model won't have gradients.
213
+
214
+ Args:
215
+ functions: A :class:`torch.nn.Sequential` or the list of modules or functions to run sequentially.
216
+ x: A Tensor that is input to :attr:`functions`
217
+ every: checkpoint every-n functions (default: 1)
218
+ flatten (bool): flatten nn.Sequential of nn.Sequentials
219
+ skip_last (bool): skip checkpointing the last function in the sequence if True
220
+ preserve_rng_state (bool, optional, default=True): Omit stashing and restoring
221
+ the RNG state during each checkpoint.
222
+
223
+ Returns:
224
+ Output of running :attr:`functions` sequentially on :attr:`*inputs`
225
+
226
+ Example:
227
+ >>> model = nn.Sequential(...)
228
+ >>> input_var = checkpoint_seq(model, input_var, every=2)
229
+ """
230
+ def run_function(start, end, functions):
231
+ def forward(_x):
232
+ for j in range(start, end + 1):
233
+ _x = functions[j](_x)
234
+ return _x
235
+ return forward
236
+
237
+ if isinstance(functions, torch.nn.Sequential):
238
+ functions = functions.children()
239
+ if flatten:
240
+ functions = chain.from_iterable(functions)
241
+ if not isinstance(functions, (tuple, list)):
242
+ functions = tuple(functions)
243
+
244
+ num_checkpointed = len(functions)
245
+ if skip_last:
246
+ num_checkpointed -= 1
247
+ end = -1
248
+ for start in range(0, num_checkpointed, every):
249
+ end = min(start + every - 1, num_checkpointed - 1)
250
+ x = checkpoint(run_function(start, end, functions), x, preserve_rng_state=preserve_rng_state)
251
+ if skip_last:
252
+ return run_function(end + 1, len(functions) - 1, functions)(x)
253
+ return x
254
+
255
+
256
+ def adapt_input_conv(in_chans, conv_weight):
257
+ conv_type = conv_weight.dtype
258
+ conv_weight = conv_weight.float() # Some weights are in torch.half, ensure it's float for sum on CPU
259
+ O, I, J, K = conv_weight.shape
260
+ if in_chans == 1:
261
+ if I > 3:
262
+ assert conv_weight.shape[1] % 3 == 0
263
+ # For models with space2depth stems
264
+ conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)
265
+ conv_weight = conv_weight.sum(dim=2, keepdim=False)
266
+ else:
267
+ conv_weight = conv_weight.sum(dim=1, keepdim=True)
268
+ elif in_chans != 3:
269
+ if I != 3:
270
+ raise NotImplementedError('Weight format not supported by conversion.')
271
+ else:
272
+ # NOTE this strategy should be better than random init, but there could be other combinations of
273
+ # the original RGB input layer weights that'd work better for specific cases.
274
+ repeat = int(math.ceil(in_chans / 3))
275
+ conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]
276
+ conv_weight *= (3 / float(in_chans))
277
+ conv_weight = conv_weight.to(conv_type)
278
+ return conv_weight
pytorch-image-models/timm/models/_pretrained.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import deque, defaultdict
3
+ from dataclasses import dataclass, field, replace, asdict
4
+ from typing import Any, Deque, Dict, Tuple, Optional, Union
5
+
6
+
7
+ __all__ = ['PretrainedCfg', 'filter_pretrained_cfg', 'DefaultCfg']
8
+
9
+
10
+ @dataclass
11
+ class PretrainedCfg:
12
+ """
13
+ """
14
+ # weight source locations
15
+ url: Optional[Union[str, Tuple[str, str]]] = None # remote URL
16
+ file: Optional[str] = None # local / shared filesystem path
17
+ state_dict: Optional[Dict[str, Any]] = None # in-memory state dict
18
+ hf_hub_id: Optional[str] = None # Hugging Face Hub model id ('organization/model')
19
+ hf_hub_filename: Optional[str] = None # Hugging Face Hub filename (overrides default)
20
+
21
+ source: Optional[str] = None # source of cfg / weight location used (url, file, hf-hub)
22
+ architecture: Optional[str] = None # architecture variant can be set when not implicit
23
+ tag: Optional[str] = None # pretrained tag of source
24
+ custom_load: bool = False # use custom model specific model.load_pretrained() (ie for npz files)
25
+
26
+ # input / data config
27
+ input_size: Tuple[int, int, int] = (3, 224, 224)
28
+ test_input_size: Optional[Tuple[int, int, int]] = None
29
+ min_input_size: Optional[Tuple[int, int, int]] = None
30
+ fixed_input_size: bool = False
31
+ interpolation: str = 'bicubic'
32
+ crop_pct: float = 0.875
33
+ test_crop_pct: Optional[float] = None
34
+ crop_mode: str = 'center'
35
+ mean: Tuple[float, ...] = (0.485, 0.456, 0.406)
36
+ std: Tuple[float, ...] = (0.229, 0.224, 0.225)
37
+
38
+ # head / classifier config and meta-data
39
+ num_classes: int = 1000
40
+ label_offset: Optional[int] = None
41
+ label_names: Optional[Tuple[str]] = None
42
+ label_descriptions: Optional[Dict[str, str]] = None
43
+
44
+ # model attributes that vary with above or required for pretrained adaptation
45
+ pool_size: Optional[Tuple[int, ...]] = None
46
+ test_pool_size: Optional[Tuple[int, ...]] = None
47
+ first_conv: Optional[str] = None
48
+ classifier: Optional[str] = None
49
+
50
+ license: Optional[str] = None
51
+ description: Optional[str] = None
52
+ origin_url: Optional[str] = None
53
+ paper_name: Optional[str] = None
54
+ paper_ids: Optional[Union[str, Tuple[str]]] = None
55
+ notes: Optional[Tuple[str]] = None
56
+
57
+ @property
58
+ def has_weights(self):
59
+ return self.url or self.file or self.hf_hub_id
60
+
61
+ def to_dict(self, remove_source=False, remove_null=True):
62
+ return filter_pretrained_cfg(
63
+ asdict(self),
64
+ remove_source=remove_source,
65
+ remove_null=remove_null
66
+ )
67
+
68
+
69
+ def filter_pretrained_cfg(cfg, remove_source=False, remove_null=True):
70
+ filtered_cfg = {}
71
+ keep_null = {'pool_size', 'first_conv', 'classifier'} # always keep these keys, even if none
72
+ for k, v in cfg.items():
73
+ if remove_source and k in {'url', 'file', 'hf_hub_id', 'hf_hub_id', 'hf_hub_filename', 'source'}:
74
+ continue
75
+ if remove_null and v is None and k not in keep_null:
76
+ continue
77
+ filtered_cfg[k] = v
78
+ return filtered_cfg
79
+
80
+
81
+ @dataclass
82
+ class DefaultCfg:
83
+ tags: Deque[str] = field(default_factory=deque) # priority queue of tags (first is default)
84
+ cfgs: Dict[str, PretrainedCfg] = field(default_factory=dict) # pretrained cfgs by tag
85
+ is_pretrained: bool = False # at least one of the configs has a pretrained source set
86
+
87
+ @property
88
+ def default(self):
89
+ return self.cfgs[self.tags[0]]
90
+
91
+ @property
92
+ def default_with_tag(self):
93
+ tag = self.tags[0]
94
+ return tag, self.cfgs[tag]
pytorch-image-models/timm/models/_prune.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pkgutil
3
+ from copy import deepcopy
4
+
5
+ from torch import nn as nn
6
+
7
+ from timm.layers import Conv2dSame, BatchNormAct2d, Linear
8
+
9
+ __all__ = ['extract_layer', 'set_layer', 'adapt_model_from_string', 'adapt_model_from_file']
10
+
11
+
12
+ def extract_layer(model, layer):
13
+ layer = layer.split('.')
14
+ module = model
15
+ if hasattr(model, 'module') and layer[0] != 'module':
16
+ module = model.module
17
+ if not hasattr(model, 'module') and layer[0] == 'module':
18
+ layer = layer[1:]
19
+ for l in layer:
20
+ if hasattr(module, l):
21
+ if not l.isdigit():
22
+ module = getattr(module, l)
23
+ else:
24
+ module = module[int(l)]
25
+ else:
26
+ return module
27
+ return module
28
+
29
+
30
+ def set_layer(model, layer, val):
31
+ layer = layer.split('.')
32
+ module = model
33
+ if hasattr(model, 'module') and layer[0] != 'module':
34
+ module = model.module
35
+ lst_index = 0
36
+ module2 = module
37
+ for l in layer:
38
+ if hasattr(module2, l):
39
+ if not l.isdigit():
40
+ module2 = getattr(module2, l)
41
+ else:
42
+ module2 = module2[int(l)]
43
+ lst_index += 1
44
+ lst_index -= 1
45
+ for l in layer[:lst_index]:
46
+ if not l.isdigit():
47
+ module = getattr(module, l)
48
+ else:
49
+ module = module[int(l)]
50
+ l = layer[lst_index]
51
+ setattr(module, l, val)
52
+
53
+
54
+ def adapt_model_from_string(parent_module, model_string):
55
+ separator = '***'
56
+ state_dict = {}
57
+ lst_shape = model_string.split(separator)
58
+ for k in lst_shape:
59
+ k = k.split(':')
60
+ key = k[0]
61
+ shape = k[1][1:-1].split(',')
62
+ if shape[0] != '':
63
+ state_dict[key] = [int(i) for i in shape]
64
+
65
+ new_module = deepcopy(parent_module)
66
+ for n, m in parent_module.named_modules():
67
+ old_module = extract_layer(parent_module, n)
68
+ if isinstance(old_module, nn.Conv2d) or isinstance(old_module, Conv2dSame):
69
+ if isinstance(old_module, Conv2dSame):
70
+ conv = Conv2dSame
71
+ else:
72
+ conv = nn.Conv2d
73
+ s = state_dict[n + '.weight']
74
+ in_channels = s[1]
75
+ out_channels = s[0]
76
+ g = 1
77
+ if old_module.groups > 1:
78
+ in_channels = out_channels
79
+ g = in_channels
80
+ new_conv = conv(
81
+ in_channels=in_channels, out_channels=out_channels, kernel_size=old_module.kernel_size,
82
+ bias=old_module.bias is not None, padding=old_module.padding, dilation=old_module.dilation,
83
+ groups=g, stride=old_module.stride)
84
+ set_layer(new_module, n, new_conv)
85
+ elif isinstance(old_module, BatchNormAct2d):
86
+ new_bn = BatchNormAct2d(
87
+ state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
88
+ affine=old_module.affine, track_running_stats=True)
89
+ new_bn.drop = old_module.drop
90
+ new_bn.act = old_module.act
91
+ set_layer(new_module, n, new_bn)
92
+ elif isinstance(old_module, nn.BatchNorm2d):
93
+ new_bn = nn.BatchNorm2d(
94
+ num_features=state_dict[n + '.weight'][0], eps=old_module.eps, momentum=old_module.momentum,
95
+ affine=old_module.affine, track_running_stats=True)
96
+ set_layer(new_module, n, new_bn)
97
+ elif isinstance(old_module, nn.Linear):
98
+ # FIXME extra checks to ensure this is actually the FC classifier layer and not a diff Linear layer?
99
+ num_features = state_dict[n + '.weight'][1]
100
+ new_fc = Linear(
101
+ in_features=num_features, out_features=old_module.out_features, bias=old_module.bias is not None)
102
+ set_layer(new_module, n, new_fc)
103
+ if hasattr(new_module, 'num_features'):
104
+ if getattr(new_module, 'head_hidden_size', 0) == new_module.num_features:
105
+ new_module.head_hidden_size = num_features
106
+ new_module.num_features = num_features
107
+
108
+ new_module.eval()
109
+ parent_module.eval()
110
+
111
+ return new_module
112
+
113
+
114
+ def adapt_model_from_file(parent_module, model_variant):
115
+ adapt_data = pkgutil.get_data(__name__, os.path.join('_pruned', model_variant + '.txt'))
116
+ return adapt_model_from_string(parent_module, adapt_data.decode('utf-8').strip())
pytorch-image-models/timm/models/_pruned/efficientnet_b1_pruned.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ conv_stem.weight:[32, 3, 3, 3]***bn1.weight:[32]***bn1.bias:[32]***bn1.running_mean:[32]***bn1.running_var:[32]***bn1.num_batches_tracked:[]***blocks.0.0.conv_dw.weight:[32, 1, 3, 3]***blocks.0.0.bn1.weight:[32]***blocks.0.0.bn1.bias:[32]***blocks.0.0.bn1.running_mean:[32]***blocks.0.0.bn1.running_var:[32]***blocks.0.0.bn1.num_batches_tracked:[]***blocks.0.0.se.conv_reduce.weight:[8, 32, 1, 1]***blocks.0.0.se.conv_reduce.bias:[8]***blocks.0.0.se.conv_expand.weight:[32, 8, 1, 1]***blocks.0.0.se.conv_expand.bias:[32]***blocks.0.0.conv_pw.weight:[16, 32, 1, 1]***blocks.0.0.bn2.weight:[16]***blocks.0.0.bn2.bias:[16]***blocks.0.0.bn2.running_mean:[16]***blocks.0.0.bn2.running_var:[16]***blocks.0.0.bn2.num_batches_tracked:[]***blocks.0.1.conv_dw.weight:[16, 1, 3, 3]***blocks.0.1.bn1.weight:[16]***blocks.0.1.bn1.bias:[16]***blocks.0.1.bn1.running_mean:[16]***blocks.0.1.bn1.running_var:[16]***blocks.0.1.bn1.num_batches_tracked:[]***blocks.0.1.se.conv_reduce.weight:[4, 16, 1, 1]***blocks.0.1.se.conv_reduce.bias:[4]***blocks.0.1.se.conv_expand.weight:[16, 4, 1, 1]***blocks.0.1.se.conv_expand.bias:[16]***blocks.0.1.conv_pw.weight:[16, 16, 1, 1]***blocks.0.1.bn2.weight:[16]***blocks.0.1.bn2.bias:[16]***blocks.0.1.bn2.running_mean:[16]***blocks.0.1.bn2.running_var:[16]***blocks.0.1.bn2.num_batches_tracked:[]***blocks.1.0.conv_pw.weight:[48, 16, 1, 1]***blocks.1.0.bn1.weight:[48]***blocks.1.0.bn1.bias:[48]***blocks.1.0.bn1.running_mean:[48]***blocks.1.0.bn1.running_var:[48]***blocks.1.0.bn1.num_batches_tracked:[]***blocks.1.0.conv_dw.weight:[48, 1, 3, 3]***blocks.1.0.bn2.weight:[48]***blocks.1.0.bn2.bias:[48]***blocks.1.0.bn2.running_mean:[48]***blocks.1.0.bn2.running_var:[48]***blocks.1.0.bn2.num_batches_tracked:[]***blocks.1.0.se.conv_reduce.weight:[4, 48, 1, 1]***blocks.1.0.se.conv_reduce.bias:[4]***blocks.1.0.se.conv_expand.weight:[48, 4, 1, 1]***blocks.1.0.se.conv_expand.bias:[48]***blocks.1.0.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.0.bn3.weight:[12]***blocks.1.0.bn3.bias:[12]***blocks.1.0.bn3.running_mean:[12]***blocks.1.0.bn3.running_var:[12]***blocks.1.0.bn3.num_batches_tracked:[]***blocks.1.1.conv_pw.weight:[62, 12, 1, 1]***blocks.1.1.bn1.weight:[62]***blocks.1.1.bn1.bias:[62]***blocks.1.1.bn1.running_mean:[62]***blocks.1.1.bn1.running_var:[62]***blocks.1.1.bn1.num_batches_tracked:[]***blocks.1.1.conv_dw.weight:[62, 1, 3, 3]***blocks.1.1.bn2.weight:[62]***blocks.1.1.bn2.bias:[62]***blocks.1.1.bn2.running_mean:[62]***blocks.1.1.bn2.running_var:[62]***blocks.1.1.bn2.num_batches_tracked:[]***blocks.1.1.se.conv_reduce.weight:[6, 62, 1, 1]***blocks.1.1.se.conv_reduce.bias:[6]***blocks.1.1.se.conv_expand.weight:[62, 6, 1, 1]***blocks.1.1.se.conv_expand.bias:[62]***blocks.1.1.conv_pwl.weight:[12, 62, 1, 1]***blocks.1.1.bn3.weight:[12]***blocks.1.1.bn3.bias:[12]***blocks.1.1.bn3.running_mean:[12]***blocks.1.1.bn3.running_var:[12]***blocks.1.1.bn3.num_batches_tracked:[]***blocks.1.2.conv_pw.weight:[48, 12, 1, 1]***blocks.1.2.bn1.weight:[48]***blocks.1.2.bn1.bias:[48]***blocks.1.2.bn1.running_mean:[48]***blocks.1.2.bn1.running_var:[48]***blocks.1.2.bn1.num_batches_tracked:[]***blocks.1.2.conv_dw.weight:[48, 1, 3, 3]***blocks.1.2.bn2.weight:[48]***blocks.1.2.bn2.bias:[48]***blocks.1.2.bn2.running_mean:[48]***blocks.1.2.bn2.running_var:[48]***blocks.1.2.bn2.num_batches_tracked:[]***blocks.1.2.se.conv_reduce.weight:[6, 48, 1, 1]***blocks.1.2.se.conv_reduce.bias:[6]***blocks.1.2.se.conv_expand.weight:[48, 6, 1, 1]***blocks.1.2.se.conv_expand.bias:[48]***blocks.1.2.conv_pwl.weight:[12, 48, 1, 1]***blocks.1.2.bn3.weight:[12]***blocks.1.2.bn3.bias:[12]***blocks.1.2.bn3.running_mean:[12]***blocks.1.2.bn3.running_var:[12]***blocks.1.2.bn3.num_batches_tracked:[]***blocks.2.0.conv_pw.weight:[70, 12, 1, 1]***blocks.2.0.bn1.weight:[70]***blocks.2.0.bn1.bias:[70]***blocks.2.0.bn1.running_mean:[70]***blocks.2.0.bn1.running_var:[70]***blocks.2.0.bn1.num_batches_tracked:[]***blocks.2.0.conv_dw.weight:[70, 1, 5, 5]***blocks.2.0.bn2.weight:[70]***blocks.2.0.bn2.bias:[70]***blocks.2.0.bn2.running_mean:[70]***blocks.2.0.bn2.running_var:[70]***blocks.2.0.bn2.num_batches_tracked:[]***blocks.2.0.se.conv_reduce.weight:[6, 70, 1, 1]***blocks.2.0.se.conv_reduce.bias:[6]***blocks.2.0.se.conv_expand.weight:[70, 6, 1, 1]***blocks.2.0.se.conv_expand.bias:[70]***blocks.2.0.conv_pwl.weight:[35, 70, 1, 1]***blocks.2.0.bn3.weight:[35]***blocks.2.0.bn3.bias:[35]***blocks.2.0.bn3.running_mean:[35]***blocks.2.0.bn3.running_var:[35]***blocks.2.0.bn3.num_batches_tracked:[]***blocks.2.1.conv_pw.weight:[61, 35, 1, 1]***blocks.2.1.bn1.weight:[61]***blocks.2.1.bn1.bias:[61]***blocks.2.1.bn1.running_mean:[61]***blocks.2.1.bn1.running_var:[61]***blocks.2.1.bn1.num_batches_tracked:[]***blocks.2.1.conv_dw.weight:[61, 1, 5, 5]***blocks.2.1.bn2.weight:[61]***blocks.2.1.bn2.bias:[61]***blocks.2.1.bn2.running_mean:[61]***blocks.2.1.bn2.running_var:[61]***blocks.2.1.bn2.num_batches_tracked:[]***blocks.2.1.se.conv_reduce.weight:[10, 61, 1, 1]***blocks.2.1.se.conv_reduce.bias:[10]***blocks.2.1.se.conv_expand.weight:[61, 10, 1, 1]***blocks.2.1.se.conv_expand.bias:[61]***blocks.2.1.conv_pwl.weight:[35, 61, 1, 1]***blocks.2.1.bn3.weight:[35]***blocks.2.1.bn3.bias:[35]***blocks.2.1.bn3.running_mean:[35]***blocks.2.1.bn3.running_var:[35]***blocks.2.1.bn3.num_batches_tracked:[]***blocks.2.2.conv_pw.weight:[51, 35, 1, 1]***blocks.2.2.bn1.weight:[51]***blocks.2.2.bn1.bias:[51]***blocks.2.2.bn1.running_mean:[51]***blocks.2.2.bn1.running_var:[51]***blocks.2.2.bn1.num_batches_tracked:[]***blocks.2.2.conv_dw.weight:[51, 1, 5, 5]***blocks.2.2.bn2.weight:[51]***blocks.2.2.bn2.bias:[51]***blocks.2.2.bn2.running_mean:[51]***blocks.2.2.bn2.running_var:[51]***blocks.2.2.bn2.num_batches_tracked:[]***blocks.2.2.se.conv_reduce.weight:[10, 51, 1, 1]***blocks.2.2.se.conv_reduce.bias:[10]***blocks.2.2.se.conv_expand.weight:[51, 10, 1, 1]***blocks.2.2.se.conv_expand.bias:[51]***blocks.2.2.conv_pwl.weight:[35, 51, 1, 1]***blocks.2.2.bn3.weight:[35]***blocks.2.2.bn3.bias:[35]***blocks.2.2.bn3.running_mean:[35]***blocks.2.2.bn3.running_var:[35]***blocks.2.2.bn3.num_batches_tracked:[]***blocks.3.0.conv_pw.weight:[175, 35, 1, 1]***blocks.3.0.bn1.weight:[175]***blocks.3.0.bn1.bias:[175]***blocks.3.0.bn1.running_mean:[175]***blocks.3.0.bn1.running_var:[175]***blocks.3.0.bn1.num_batches_tracked:[]***blocks.3.0.conv_dw.weight:[175, 1, 3, 3]***blocks.3.0.bn2.weight:[175]***blocks.3.0.bn2.bias:[175]***blocks.3.0.bn2.running_mean:[175]***blocks.3.0.bn2.running_var:[175]***blocks.3.0.bn2.num_batches_tracked:[]***blocks.3.0.se.conv_reduce.weight:[10, 175, 1, 1]***blocks.3.0.se.conv_reduce.bias:[10]***blocks.3.0.se.conv_expand.weight:[175, 10, 1, 1]***blocks.3.0.se.conv_expand.bias:[175]***blocks.3.0.conv_pwl.weight:[74, 175, 1, 1]***blocks.3.0.bn3.weight:[74]***blocks.3.0.bn3.bias:[74]***blocks.3.0.bn3.running_mean:[74]***blocks.3.0.bn3.running_var:[74]***blocks.3.0.bn3.num_batches_tracked:[]***blocks.3.1.conv_pw.weight:[188, 74, 1, 1]***blocks.3.1.bn1.weight:[188]***blocks.3.1.bn1.bias:[188]***blocks.3.1.bn1.running_mean:[188]***blocks.3.1.bn1.running_var:[188]***blocks.3.1.bn1.num_batches_tracked:[]***blocks.3.1.conv_dw.weight:[188, 1, 3, 3]***blocks.3.1.bn2.weight:[188]***blocks.3.1.bn2.bias:[188]***blocks.3.1.bn2.running_mean:[188]***blocks.3.1.bn2.running_var:[188]***blocks.3.1.bn2.num_batches_tracked:[]***blocks.3.1.se.conv_reduce.weight:[20, 188, 1, 1]***blocks.3.1.se.conv_reduce.bias:[20]***blocks.3.1.se.conv_expand.weight:[188, 20, 1, 1]***blocks.3.1.se.conv_expand.bias:[188]***blocks.3.1.conv_pwl.weight:[74, 188, 1, 1]***blocks.3.1.bn3.weight:[74]***blocks.3.1.bn3.bias:[74]***blocks.3.1.bn3.running_mean:[74]***blocks.3.1.bn3.running_var:[74]***blocks.3.1.bn3.num_batches_tracked:[]***blocks.3.2.conv_pw.weight:[137, 74, 1, 1]***blocks.3.2.bn1.weight:[137]***blocks.3.2.bn1.bias:[137]***blocks.3.2.bn1.running_mean:[137]***blocks.3.2.bn1.running_var:[137]***blocks.3.2.bn1.num_batches_tracked:[]***blocks.3.2.conv_dw.weight:[137, 1, 3, 3]***blocks.3.2.bn2.weight:[137]***blocks.3.2.bn2.bias:[137]***blocks.3.2.bn2.running_mean:[137]***blocks.3.2.bn2.running_var:[137]***blocks.3.2.bn2.num_batches_tracked:[]***blocks.3.2.se.conv_reduce.weight:[20, 137, 1, 1]***blocks.3.2.se.conv_reduce.bias:[20]***blocks.3.2.se.conv_expand.weight:[137, 20, 1, 1]***blocks.3.2.se.conv_expand.bias:[137]***blocks.3.2.conv_pwl.weight:[74, 137, 1, 1]***blocks.3.2.bn3.weight:[74]***blocks.3.2.bn3.bias:[74]***blocks.3.2.bn3.running_mean:[74]***blocks.3.2.bn3.running_var:[74]***blocks.3.2.bn3.num_batches_tracked:[]***blocks.3.3.conv_pw.weight:[164, 74, 1, 1]***blocks.3.3.bn1.weight:[164]***blocks.3.3.bn1.bias:[164]***blocks.3.3.bn1.running_mean:[164]***blocks.3.3.bn1.running_var:[164]***blocks.3.3.bn1.num_batches_tracked:[]***blocks.3.3.conv_dw.weight:[164, 1, 3, 3]***blocks.3.3.bn2.weight:[164]***blocks.3.3.bn2.bias:[164]***blocks.3.3.bn2.running_mean:[164]***blocks.3.3.bn2.running_var:[164]***blocks.3.3.bn2.num_batches_tracked:[]***blocks.3.3.se.conv_reduce.weight:[20, 164, 1, 1]***blocks.3.3.se.conv_reduce.bias:[20]***blocks.3.3.se.conv_expand.weight:[164, 20, 1, 1]***blocks.3.3.se.conv_expand.bias:[164]***blocks.3.3.conv_pwl.weight:[74, 164, 1, 1]***blocks.3.3.bn3.weight:[74]***blocks.3.3.bn3.bias:[74]***blocks.3.3.bn3.running_mean:[74]***blocks.3.3.bn3.running_var:[74]***blocks.3.3.bn3.num_batches_tracked:[]***blocks.4.0.conv_pw.weight:[399, 74, 1, 1]***blocks.4.0.bn1.weight:[399]***blocks.4.0.bn1.bias:[399]***blocks.4.0.bn1.running_mean:[399]***blocks.4.0.bn1.running_var:[399]***blocks.4.0.bn1.num_batches_tracked:[]***blocks.4.0.conv_dw.weight:[399, 1, 5, 5]***blocks.4.0.bn2.weight:[399]***blocks.4.0.bn2.bias:[399]***blocks.4.0.bn2.running_mean:[399]***blocks.4.0.bn2.running_var:[399]***blocks.4.0.bn2.num_batches_tracked:[]***blocks.4.0.se.conv_reduce.weight:[20, 399, 1, 1]***blocks.4.0.se.conv_reduce.bias:[20]***blocks.4.0.se.conv_expand.weight:[399, 20, 1, 1]***blocks.4.0.se.conv_expand.bias:[399]***blocks.4.0.conv_pwl.weight:[67, 399, 1, 1]***blocks.4.0.bn3.weight:[67]***blocks.4.0.bn3.bias:[67]***blocks.4.0.bn3.running_mean:[67]***blocks.4.0.bn3.running_var:[67]***blocks.4.0.bn3.num_batches_tracked:[]***blocks.4.1.conv_pw.weight:[201, 67, 1, 1]***blocks.4.1.bn1.weight:[201]***blocks.4.1.bn1.bias:[201]***blocks.4.1.bn1.running_mean:[201]***blocks.4.1.bn1.running_var:[201]***blocks.4.1.bn1.num_batches_tracked:[]***blocks.4.1.conv_dw.weight:[201, 1, 5, 5]***blocks.4.1.bn2.weight:[201]***blocks.4.1.bn2.bias:[201]***blocks.4.1.bn2.running_mean:[201]***blocks.4.1.bn2.running_var:[201]***blocks.4.1.bn2.num_batches_tracked:[]***blocks.4.1.se.conv_reduce.weight:[28, 201, 1, 1]***blocks.4.1.se.conv_reduce.bias:[28]***blocks.4.1.se.conv_expand.weight:[201, 28, 1, 1]***blocks.4.1.se.conv_expand.bias:[201]***blocks.4.1.conv_pwl.weight:[67, 201, 1, 1]***blocks.4.1.bn3.weight:[67]***blocks.4.1.bn3.bias:[67]***blocks.4.1.bn3.running_mean:[67]***blocks.4.1.bn3.running_var:[67]***blocks.4.1.bn3.num_batches_tracked:[]***blocks.4.2.conv_pw.weight:[160, 67, 1, 1]***blocks.4.2.bn1.weight:[160]***blocks.4.2.bn1.bias:[160]***blocks.4.2.bn1.running_mean:[160]***blocks.4.2.bn1.running_var:[160]***blocks.4.2.bn1.num_batches_tracked:[]***blocks.4.2.conv_dw.weight:[160, 1, 5, 5]***blocks.4.2.bn2.weight:[160]***blocks.4.2.bn2.bias:[160]***blocks.4.2.bn2.running_mean:[160]***blocks.4.2.bn2.running_var:[160]***blocks.4.2.bn2.num_batches_tracked:[]***blocks.4.2.se.conv_reduce.weight:[28, 160, 1, 1]***blocks.4.2.se.conv_reduce.bias:[28]***blocks.4.2.se.conv_expand.weight:[160, 28, 1, 1]***blocks.4.2.se.conv_expand.bias:[160]***blocks.4.2.conv_pwl.weight:[67, 160, 1, 1]***blocks.4.2.bn3.weight:[67]***blocks.4.2.bn3.bias:[67]***blocks.4.2.bn3.running_mean:[67]***blocks.4.2.bn3.running_var:[67]***blocks.4.2.bn3.num_batches_tracked:[]***blocks.4.3.conv_pw.weight:[213, 67, 1, 1]***blocks.4.3.bn1.weight:[213]***blocks.4.3.bn1.bias:[213]***blocks.4.3.bn1.running_mean:[213]***blocks.4.3.bn1.running_var:[213]***blocks.4.3.bn1.num_batches_tracked:[]***blocks.4.3.conv_dw.weight:[213, 1, 5, 5]***blocks.4.3.bn2.weight:[213]***blocks.4.3.bn2.bias:[213]***blocks.4.3.bn2.running_mean:[213]***blocks.4.3.bn2.running_var:[213]***blocks.4.3.bn2.num_batches_tracked:[]***blocks.4.3.se.conv_reduce.weight:[28, 213, 1, 1]***blocks.4.3.se.conv_reduce.bias:[28]***blocks.4.3.se.conv_expand.weight:[213, 28, 1, 1]***blocks.4.3.se.conv_expand.bias:[213]***blocks.4.3.conv_pwl.weight:[67, 213, 1, 1]***blocks.4.3.bn3.weight:[67]***blocks.4.3.bn3.bias:[67]***blocks.4.3.bn3.running_mean:[67]***blocks.4.3.bn3.running_var:[67]***blocks.4.3.bn3.num_batches_tracked:[]***blocks.5.0.conv_pw.weight:[637, 67, 1, 1]***blocks.5.0.bn1.weight:[637]***blocks.5.0.bn1.bias:[637]***blocks.5.0.bn1.running_mean:[637]***blocks.5.0.bn1.running_var:[637]***blocks.5.0.bn1.num_batches_tracked:[]***blocks.5.0.conv_dw.weight:[637, 1, 5, 5]***blocks.5.0.bn2.weight:[637]***blocks.5.0.bn2.bias:[637]***blocks.5.0.bn2.running_mean:[637]***blocks.5.0.bn2.running_var:[637]***blocks.5.0.bn2.num_batches_tracked:[]***blocks.5.0.se.conv_reduce.weight:[27, 637, 1, 1]***blocks.5.0.se.conv_reduce.bias:[27]***blocks.5.0.se.conv_expand.weight:[637, 27, 1, 1]***blocks.5.0.se.conv_expand.bias:[637]***blocks.5.0.conv_pwl.weight:[192, 637, 1, 1]***blocks.5.0.bn3.weight:[192]***blocks.5.0.bn3.bias:[192]***blocks.5.0.bn3.running_mean:[192]***blocks.5.0.bn3.running_var:[192]***blocks.5.0.bn3.num_batches_tracked:[]***blocks.5.1.conv_pw.weight:[806, 192, 1, 1]***blocks.5.1.bn1.weight:[806]***blocks.5.1.bn1.bias:[806]***blocks.5.1.bn1.running_mean:[806]***blocks.5.1.bn1.running_var:[806]***blocks.5.1.bn1.num_batches_tracked:[]***blocks.5.1.conv_dw.weight:[806, 1, 5, 5]***blocks.5.1.bn2.weight:[806]***blocks.5.1.bn2.bias:[806]***blocks.5.1.bn2.running_mean:[806]***blocks.5.1.bn2.running_var:[806]***blocks.5.1.bn2.num_batches_tracked:[]***blocks.5.1.se.conv_reduce.weight:[48, 806, 1, 1]***blocks.5.1.se.conv_reduce.bias:[48]***blocks.5.1.se.conv_expand.weight:[806, 48, 1, 1]***blocks.5.1.se.conv_expand.bias:[806]***blocks.5.1.conv_pwl.weight:[192, 806, 1, 1]***blocks.5.1.bn3.weight:[192]***blocks.5.1.bn3.bias:[192]***blocks.5.1.bn3.running_mean:[192]***blocks.5.1.bn3.running_var:[192]***blocks.5.1.bn3.num_batches_tracked:[]***blocks.5.2.conv_pw.weight:[798, 192, 1, 1]***blocks.5.2.bn1.weight:[798]***blocks.5.2.bn1.bias:[798]***blocks.5.2.bn1.running_mean:[798]***blocks.5.2.bn1.running_var:[798]***blocks.5.2.bn1.num_batches_tracked:[]***blocks.5.2.conv_dw.weight:[798, 1, 5, 5]***blocks.5.2.bn2.weight:[798]***blocks.5.2.bn2.bias:[798]***blocks.5.2.bn2.running_mean:[798]***blocks.5.2.bn2.running_var:[798]***blocks.5.2.bn2.num_batches_tracked:[]***blocks.5.2.se.conv_reduce.weight:[48, 798, 1, 1]***blocks.5.2.se.conv_reduce.bias:[48]***blocks.5.2.se.conv_expand.weight:[798, 48, 1, 1]***blocks.5.2.se.conv_expand.bias:[798]***blocks.5.2.conv_pwl.weight:[192, 798, 1, 1]***blocks.5.2.bn3.weight:[192]***blocks.5.2.bn3.bias:[192]***blocks.5.2.bn3.running_mean:[192]***blocks.5.2.bn3.running_var:[192]***blocks.5.2.bn3.num_batches_tracked:[]***blocks.5.3.conv_pw.weight:[891, 192, 1, 1]***blocks.5.3.bn1.weight:[891]***blocks.5.3.bn1.bias:[891]***blocks.5.3.bn1.running_mean:[891]***blocks.5.3.bn1.running_var:[891]***blocks.5.3.bn1.num_batches_tracked:[]***blocks.5.3.conv_dw.weight:[891, 1, 5, 5]***blocks.5.3.bn2.weight:[891]***blocks.5.3.bn2.bias:[891]***blocks.5.3.bn2.running_mean:[891]***blocks.5.3.bn2.running_var:[891]***blocks.5.3.bn2.num_batches_tracked:[]***blocks.5.3.se.conv_reduce.weight:[48, 891, 1, 1]***blocks.5.3.se.conv_reduce.bias:[48]***blocks.5.3.se.conv_expand.weight:[891, 48, 1, 1]***blocks.5.3.se.conv_expand.bias:[891]***blocks.5.3.conv_pwl.weight:[192, 891, 1, 1]***blocks.5.3.bn3.weight:[192]***blocks.5.3.bn3.bias:[192]***blocks.5.3.bn3.running_mean:[192]***blocks.5.3.bn3.running_var:[192]***blocks.5.3.bn3.num_batches_tracked:[]***blocks.5.4.conv_pw.weight:[990, 192, 1, 1]***blocks.5.4.bn1.weight:[990]***blocks.5.4.bn1.bias:[990]***blocks.5.4.bn1.running_mean:[990]***blocks.5.4.bn1.running_var:[990]***blocks.5.4.bn1.num_batches_tracked:[]***blocks.5.4.conv_dw.weight:[990, 1, 5, 5]***blocks.5.4.bn2.weight:[990]***blocks.5.4.bn2.bias:[990]***blocks.5.4.bn2.running_mean:[990]***blocks.5.4.bn2.running_var:[990]***blocks.5.4.bn2.num_batches_tracked:[]***blocks.5.4.se.conv_reduce.weight:[48, 990, 1, 1]***blocks.5.4.se.conv_reduce.bias:[48]***blocks.5.4.se.conv_expand.weight:[990, 48, 1, 1]***blocks.5.4.se.conv_expand.bias:[990]***blocks.5.4.conv_pwl.weight:[192, 990, 1, 1]***blocks.5.4.bn3.weight:[192]***blocks.5.4.bn3.bias:[192]***blocks.5.4.bn3.running_mean:[192]***blocks.5.4.bn3.running_var:[192]***blocks.5.4.bn3.num_batches_tracked:[]***blocks.6.0.conv_pw.weight:[1152, 192, 1, 1]***blocks.6.0.bn1.weight:[1152]***blocks.6.0.bn1.bias:[1152]***blocks.6.0.bn1.running_mean:[1152]***blocks.6.0.bn1.running_var:[1152]***blocks.6.0.bn1.num_batches_tracked:[]***blocks.6.0.conv_dw.weight:[1152, 1, 3, 3]***blocks.6.0.bn2.weight:[1152]***blocks.6.0.bn2.bias:[1152]***blocks.6.0.bn2.running_mean:[1152]***blocks.6.0.bn2.running_var:[1152]***blocks.6.0.bn2.num_batches_tracked:[]***blocks.6.0.se.conv_reduce.weight:[48, 1152, 1, 1]***blocks.6.0.se.conv_reduce.bias:[48]***blocks.6.0.se.conv_expand.weight:[1152, 48, 1, 1]***blocks.6.0.se.conv_expand.bias:[1152]***blocks.6.0.conv_pwl.weight:[320, 1152, 1, 1]***blocks.6.0.bn3.weight:[320]***blocks.6.0.bn3.bias:[320]***blocks.6.0.bn3.running_mean:[320]***blocks.6.0.bn3.running_var:[320]***blocks.6.0.bn3.num_batches_tracked:[]***blocks.6.1.conv_pw.weight:[1912, 320, 1, 1]***blocks.6.1.bn1.weight:[1912]***blocks.6.1.bn1.bias:[1912]***blocks.6.1.bn1.running_mean:[1912]***blocks.6.1.bn1.running_var:[1912]***blocks.6.1.bn1.num_batches_tracked:[]***blocks.6.1.conv_dw.weight:[1912, 1, 3, 3]***blocks.6.1.bn2.weight:[1912]***blocks.6.1.bn2.bias:[1912]***blocks.6.1.bn2.running_mean:[1912]***blocks.6.1.bn2.running_var:[1912]***blocks.6.1.bn2.num_batches_tracked:[]***blocks.6.1.se.conv_reduce.weight:[80, 1912, 1, 1]***blocks.6.1.se.conv_reduce.bias:[80]***blocks.6.1.se.conv_expand.weight:[1912, 80, 1, 1]***blocks.6.1.se.conv_expand.bias:[1912]***blocks.6.1.conv_pwl.weight:[320, 1912, 1, 1]***blocks.6.1.bn3.weight:[320]***blocks.6.1.bn3.bias:[320]***blocks.6.1.bn3.running_mean:[320]***blocks.6.1.bn3.running_var:[320]***blocks.6.1.bn3.num_batches_tracked:[]***conv_head.weight:[1280, 320, 1, 1]***bn2.weight:[1280]***bn2.bias:[1280]***bn2.running_mean:[1280]***bn2.running_var:[1280]***bn2.num_batches_tracked:[]***classifier.weight:[1000, 1280]***classifier.bias:[1000]
pytorch-image-models/timm/models/focalnet.py ADDED
@@ -0,0 +1,652 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ FocalNet
2
+
3
+ As described in `Focal Modulation Networks` - https://arxiv.org/abs/2203.11926
4
+
5
+ Significant modifications and refactoring from the original impl at https://github.com/microsoft/FocalNet
6
+
7
+ This impl is/has:
8
+ * fully convolutional, NCHW tensor layout throughout, seemed to have minimal performance impact but more flexible
9
+ * re-ordered downsample / layer so that striding always at beginning of layer (stage)
10
+ * no input size constraints or input resolution/H/W tracking through the model
11
+ * torchscript fixed and a number of quirks cleaned up
12
+ * feature extraction support via `features_only=True`
13
+ """
14
+ # --------------------------------------------------------
15
+ # FocalNets -- Focal Modulation Networks
16
+ # Copyright (c) 2022 Microsoft
17
+ # Licensed under The MIT License [see LICENSE for details]
18
+ # Written by Jianwei Yang (jianwyan@microsoft.com)
19
+ # --------------------------------------------------------
20
+ from functools import partial
21
+ from typing import Callable, Optional, Tuple
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ import torch.utils.checkpoint as checkpoint
26
+
27
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
28
+ from timm.layers import Mlp, DropPath, LayerNorm2d, trunc_normal_, ClassifierHead, NormMlpClassifierHead
29
+ from ._builder import build_model_with_cfg
30
+ from ._manipulate import named_apply
31
+ from ._registry import generate_default_cfgs, register_model
32
+
33
+ __all__ = ['FocalNet']
34
+
35
+
36
+ class FocalModulation(nn.Module):
37
+ def __init__(
38
+ self,
39
+ dim: int,
40
+ focal_window,
41
+ focal_level: int,
42
+ focal_factor: int = 2,
43
+ bias: bool = True,
44
+ use_post_norm: bool = False,
45
+ normalize_modulator: bool = False,
46
+ proj_drop: float = 0.,
47
+ norm_layer: Callable = LayerNorm2d,
48
+ ):
49
+ super().__init__()
50
+
51
+ self.dim = dim
52
+ self.focal_window = focal_window
53
+ self.focal_level = focal_level
54
+ self.focal_factor = focal_factor
55
+ self.use_post_norm = use_post_norm
56
+ self.normalize_modulator = normalize_modulator
57
+ self.input_split = [dim, dim, self.focal_level + 1]
58
+
59
+ self.f = nn.Conv2d(dim, 2 * dim + (self.focal_level + 1), kernel_size=1, bias=bias)
60
+ self.h = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
61
+
62
+ self.act = nn.GELU()
63
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
64
+ self.proj_drop = nn.Dropout(proj_drop)
65
+ self.focal_layers = nn.ModuleList()
66
+
67
+ self.kernel_sizes = []
68
+ for k in range(self.focal_level):
69
+ kernel_size = self.focal_factor * k + self.focal_window
70
+ self.focal_layers.append(nn.Sequential(
71
+ nn.Conv2d(dim, dim, kernel_size=kernel_size, groups=dim, padding=kernel_size // 2, bias=False),
72
+ nn.GELU(),
73
+ ))
74
+ self.kernel_sizes.append(kernel_size)
75
+ self.norm = norm_layer(dim) if self.use_post_norm else nn.Identity()
76
+
77
+ def forward(self, x):
78
+ # pre linear projection
79
+ x = self.f(x)
80
+ q, ctx, gates = torch.split(x, self.input_split, 1)
81
+
82
+ # context aggreation
83
+ ctx_all = 0
84
+ for l, focal_layer in enumerate(self.focal_layers):
85
+ ctx = focal_layer(ctx)
86
+ ctx_all = ctx_all + ctx * gates[:, l:l + 1]
87
+ ctx_global = self.act(ctx.mean((2, 3), keepdim=True))
88
+ ctx_all = ctx_all + ctx_global * gates[:, self.focal_level:]
89
+
90
+ # normalize context
91
+ if self.normalize_modulator:
92
+ ctx_all = ctx_all / (self.focal_level + 1)
93
+
94
+ # focal modulation
95
+ x_out = q * self.h(ctx_all)
96
+ x_out = self.norm(x_out)
97
+
98
+ # post linear projection
99
+ x_out = self.proj(x_out)
100
+ x_out = self.proj_drop(x_out)
101
+ return x_out
102
+
103
+
104
+ class LayerScale2d(nn.Module):
105
+ def __init__(self, dim, init_values=1e-5, inplace=False):
106
+ super().__init__()
107
+ self.inplace = inplace
108
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
109
+
110
+ def forward(self, x):
111
+ gamma = self.gamma.view(1, -1, 1, 1)
112
+ return x.mul_(gamma) if self.inplace else x * gamma
113
+
114
+
115
+ class FocalNetBlock(nn.Module):
116
+ """ Focal Modulation Network Block.
117
+ """
118
+
119
+ def __init__(
120
+ self,
121
+ dim: int,
122
+ mlp_ratio: float = 4.,
123
+ focal_level: int = 1,
124
+ focal_window: int = 3,
125
+ use_post_norm: bool = False,
126
+ use_post_norm_in_modulation: bool = False,
127
+ normalize_modulator: bool = False,
128
+ layerscale_value: float = 1e-4,
129
+ proj_drop: float = 0.,
130
+ drop_path: float = 0.,
131
+ act_layer: Callable = nn.GELU,
132
+ norm_layer: Callable = LayerNorm2d,
133
+ ):
134
+ """
135
+ Args:
136
+ dim: Number of input channels.
137
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
138
+ focal_level: Number of focal levels.
139
+ focal_window: Focal window size at first focal level.
140
+ use_post_norm: Whether to use layer norm after modulation.
141
+ use_post_norm_in_modulation: Whether to use layer norm in modulation.
142
+ layerscale_value: Initial layerscale value.
143
+ proj_drop: Dropout rate.
144
+ drop_path: Stochastic depth rate.
145
+ act_layer: Activation layer.
146
+ norm_layer: Normalization layer.
147
+ """
148
+ super().__init__()
149
+ self.dim = dim
150
+ self.mlp_ratio = mlp_ratio
151
+
152
+ self.focal_window = focal_window
153
+ self.focal_level = focal_level
154
+ self.use_post_norm = use_post_norm
155
+
156
+ self.norm1 = norm_layer(dim) if not use_post_norm else nn.Identity()
157
+ self.modulation = FocalModulation(
158
+ dim,
159
+ focal_window=focal_window,
160
+ focal_level=self.focal_level,
161
+ use_post_norm=use_post_norm_in_modulation,
162
+ normalize_modulator=normalize_modulator,
163
+ proj_drop=proj_drop,
164
+ norm_layer=norm_layer,
165
+ )
166
+ self.norm1_post = norm_layer(dim) if use_post_norm else nn.Identity()
167
+ self.ls1 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
168
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
169
+
170
+ self.norm2 = norm_layer(dim) if not use_post_norm else nn.Identity()
171
+ self.mlp = Mlp(
172
+ in_features=dim,
173
+ hidden_features=int(dim * mlp_ratio),
174
+ act_layer=act_layer,
175
+ drop=proj_drop,
176
+ use_conv=True,
177
+ )
178
+ self.norm2_post = norm_layer(dim) if use_post_norm else nn.Identity()
179
+ self.ls2 = LayerScale2d(dim, layerscale_value) if layerscale_value is not None else nn.Identity()
180
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
181
+
182
+ def forward(self, x):
183
+ shortcut = x
184
+
185
+ # Focal Modulation
186
+ x = self.norm1(x)
187
+ x = self.modulation(x)
188
+ x = self.norm1_post(x)
189
+ x = shortcut + self.drop_path1(self.ls1(x))
190
+
191
+ # FFN
192
+ x = x + self.drop_path2(self.ls2(self.norm2_post(self.mlp(self.norm2(x)))))
193
+
194
+ return x
195
+
196
+
197
+ class FocalNetStage(nn.Module):
198
+ """ A basic Focal Transformer layer for one stage.
199
+ """
200
+
201
+ def __init__(
202
+ self,
203
+ dim: int,
204
+ out_dim: int,
205
+ depth: int,
206
+ mlp_ratio: float = 4.,
207
+ downsample: bool = True,
208
+ focal_level: int = 1,
209
+ focal_window: int = 1,
210
+ use_overlap_down: bool = False,
211
+ use_post_norm: bool = False,
212
+ use_post_norm_in_modulation: bool = False,
213
+ normalize_modulator: bool = False,
214
+ layerscale_value: float = 1e-4,
215
+ proj_drop: float = 0.,
216
+ drop_path: float = 0.,
217
+ norm_layer: Callable = LayerNorm2d,
218
+ ):
219
+ """
220
+ Args:
221
+ dim: Number of input channels.
222
+ out_dim: Number of output channels.
223
+ depth: Number of blocks.
224
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
225
+ downsample: Downsample layer at start of the layer.
226
+ focal_level: Number of focal levels
227
+ focal_window: Focal window size at first focal level
228
+ use_overlap_down: User overlapped convolution in downsample layer.
229
+ use_post_norm: Whether to use layer norm after modulation.
230
+ use_post_norm_in_modulation: Whether to use layer norm in modulation.
231
+ layerscale_value: Initial layerscale value
232
+ proj_drop: Dropout rate for projections.
233
+ drop_path: Stochastic depth rate.
234
+ norm_layer: Normalization layer.
235
+ """
236
+ super().__init__()
237
+ self.dim = dim
238
+ self.depth = depth
239
+ self.grad_checkpointing = False
240
+
241
+ if downsample:
242
+ self.downsample = Downsample(
243
+ in_chs=dim,
244
+ out_chs=out_dim,
245
+ stride=2,
246
+ overlap=use_overlap_down,
247
+ norm_layer=norm_layer,
248
+ )
249
+ else:
250
+ self.downsample = nn.Identity()
251
+
252
+ # build blocks
253
+ self.blocks = nn.ModuleList([
254
+ FocalNetBlock(
255
+ dim=out_dim,
256
+ mlp_ratio=mlp_ratio,
257
+ focal_level=focal_level,
258
+ focal_window=focal_window,
259
+ use_post_norm=use_post_norm,
260
+ use_post_norm_in_modulation=use_post_norm_in_modulation,
261
+ normalize_modulator=normalize_modulator,
262
+ layerscale_value=layerscale_value,
263
+ proj_drop=proj_drop,
264
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
265
+ norm_layer=norm_layer,
266
+ )
267
+ for i in range(depth)])
268
+
269
+ @torch.jit.ignore
270
+ def set_grad_checkpointing(self, enable=True):
271
+ self.grad_checkpointing = enable
272
+
273
+ def forward(self, x):
274
+ x = self.downsample(x)
275
+ for blk in self.blocks:
276
+ if self.grad_checkpointing and not torch.jit.is_scripting():
277
+ x = checkpoint.checkpoint(blk, x)
278
+ else:
279
+ x = blk(x)
280
+ return x
281
+
282
+
283
+ class Downsample(nn.Module):
284
+
285
+ def __init__(
286
+ self,
287
+ in_chs: int,
288
+ out_chs: int,
289
+ stride: int = 4,
290
+ overlap: bool = False,
291
+ norm_layer: Optional[Callable] = None,
292
+ ):
293
+ """
294
+
295
+ Args:
296
+ in_chs: Number of input image channels.
297
+ out_chs: Number of linear projection output channels.
298
+ stride: Downsample stride.
299
+ overlap: Use overlapping convolutions if True.
300
+ norm_layer: Normalization layer.
301
+ """
302
+ super().__init__()
303
+ self.stride = stride
304
+ padding = 0
305
+ kernel_size = stride
306
+ if overlap:
307
+ assert stride in (2, 4)
308
+ if stride == 4:
309
+ kernel_size, padding = 7, 2
310
+ elif stride == 2:
311
+ kernel_size, padding = 3, 1
312
+ self.proj = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
313
+ self.norm = norm_layer(out_chs) if norm_layer is not None else nn.Identity()
314
+
315
+ def forward(self, x):
316
+ x = self.proj(x)
317
+ x = self.norm(x)
318
+ return x
319
+
320
+
321
+ class FocalNet(nn.Module):
322
+ """" Focal Modulation Networks (FocalNets)
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ in_chans: int = 3,
328
+ num_classes: int = 1000,
329
+ global_pool: str = 'avg',
330
+ embed_dim: int = 96,
331
+ depths: Tuple[int, ...] = (2, 2, 6, 2),
332
+ mlp_ratio: float = 4.,
333
+ focal_levels: Tuple[int, ...] = (2, 2, 2, 2),
334
+ focal_windows: Tuple[int, ...] = (3, 3, 3, 3),
335
+ use_overlap_down: bool = False,
336
+ use_post_norm: bool = False,
337
+ use_post_norm_in_modulation: bool = False,
338
+ normalize_modulator: bool = False,
339
+ head_hidden_size: Optional[int] = None,
340
+ head_init_scale: float = 1.0,
341
+ layerscale_value: Optional[float] = None,
342
+ drop_rate: bool = 0.,
343
+ proj_drop_rate: bool = 0.,
344
+ drop_path_rate: bool = 0.1,
345
+ norm_layer: Callable = partial(LayerNorm2d, eps=1e-5),
346
+ ):
347
+ """
348
+ Args:
349
+ in_chans: Number of input image channels.
350
+ num_classes: Number of classes for classification head.
351
+ embed_dim: Patch embedding dimension.
352
+ depths: Depth of each Focal Transformer layer.
353
+ mlp_ratio: Ratio of mlp hidden dim to embedding dim.
354
+ focal_levels: How many focal levels at all stages. Note that this excludes the finest-grain level.
355
+ focal_windows: The focal window size at all stages.
356
+ use_overlap_down: Whether to use convolutional embedding.
357
+ use_post_norm: Whether to use layernorm after modulation (it helps stablize training of large models)
358
+ layerscale_value: Value for layer scale.
359
+ drop_rate: Dropout rate.
360
+ drop_path_rate: Stochastic depth rate.
361
+ norm_layer: Normalization layer.
362
+ """
363
+ super().__init__()
364
+
365
+ self.num_layers = len(depths)
366
+ embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
367
+
368
+ self.num_classes = num_classes
369
+ self.embed_dim = embed_dim
370
+ self.num_features = self.head_hidden_size = embed_dim[-1]
371
+ self.feature_info = []
372
+
373
+ self.stem = Downsample(
374
+ in_chs=in_chans,
375
+ out_chs=embed_dim[0],
376
+ overlap=use_overlap_down,
377
+ norm_layer=norm_layer,
378
+ )
379
+ in_dim = embed_dim[0]
380
+
381
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
382
+ layers = []
383
+ for i_layer in range(self.num_layers):
384
+ out_dim = embed_dim[i_layer]
385
+ layer = FocalNetStage(
386
+ dim=in_dim,
387
+ out_dim=out_dim,
388
+ depth=depths[i_layer],
389
+ mlp_ratio=mlp_ratio,
390
+ downsample=i_layer > 0,
391
+ focal_level=focal_levels[i_layer],
392
+ focal_window=focal_windows[i_layer],
393
+ use_overlap_down=use_overlap_down,
394
+ use_post_norm=use_post_norm,
395
+ use_post_norm_in_modulation=use_post_norm_in_modulation,
396
+ normalize_modulator=normalize_modulator,
397
+ layerscale_value=layerscale_value,
398
+ proj_drop=proj_drop_rate,
399
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
400
+ norm_layer=norm_layer,
401
+ )
402
+ in_dim = out_dim
403
+ layers += [layer]
404
+ self.feature_info += [dict(num_chs=out_dim, reduction=4 * 2 ** i_layer, module=f'layers.{i_layer}')]
405
+
406
+ self.layers = nn.Sequential(*layers)
407
+
408
+ if head_hidden_size:
409
+ self.norm = nn.Identity()
410
+ self.head_hidden_size = head_hidden_size
411
+ self.head = NormMlpClassifierHead(
412
+ self.num_features,
413
+ num_classes,
414
+ hidden_size=head_hidden_size,
415
+ pool_type=global_pool,
416
+ drop_rate=drop_rate,
417
+ norm_layer=norm_layer,
418
+ )
419
+ else:
420
+ self.norm = norm_layer(self.num_features)
421
+ self.head = ClassifierHead(
422
+ self.num_features,
423
+ num_classes,
424
+ pool_type=global_pool,
425
+ drop_rate=drop_rate
426
+ )
427
+
428
+ named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
429
+
430
+ @torch.jit.ignore
431
+ def no_weight_decay(self):
432
+ return {''}
433
+
434
+ @torch.jit.ignore
435
+ def group_matcher(self, coarse=False):
436
+ return dict(
437
+ stem=r'^stem',
438
+ blocks=[
439
+ (r'^layers\.(\d+)', None),
440
+ (r'^norm', (99999,))
441
+ ] if coarse else [
442
+ (r'^layers\.(\d+).downsample', (0,)),
443
+ (r'^layers\.(\d+)\.\w+\.(\d+)', None),
444
+ (r'^norm', (99999,)),
445
+ ]
446
+ )
447
+
448
+ @torch.jit.ignore
449
+ def set_grad_checkpointing(self, enable=True):
450
+ self.grad_checkpointing = enable
451
+ for l in self.layers:
452
+ l.set_grad_checkpointing(enable=enable)
453
+
454
+ @torch.jit.ignore
455
+ def get_classifier(self) -> nn.Module:
456
+ return self.head.fc
457
+
458
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
459
+ self.head.reset(num_classes, pool_type=global_pool)
460
+
461
+ def forward_features(self, x):
462
+ x = self.stem(x)
463
+ x = self.layers(x)
464
+ x = self.norm(x)
465
+ return x
466
+
467
+ def forward_head(self, x, pre_logits: bool = False):
468
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
469
+
470
+ def forward(self, x):
471
+ x = self.forward_features(x)
472
+ x = self.forward_head(x)
473
+ return x
474
+
475
+
476
+ def _init_weights(module, name=None, head_init_scale=1.0):
477
+ if isinstance(module, nn.Conv2d):
478
+ trunc_normal_(module.weight, std=.02)
479
+ if module.bias is not None:
480
+ nn.init.zeros_(module.bias)
481
+ elif isinstance(module, nn.Linear):
482
+ trunc_normal_(module.weight, std=.02)
483
+ if module.bias is not None:
484
+ nn.init.zeros_(module.bias)
485
+ if name and 'head.fc' in name:
486
+ module.weight.data.mul_(head_init_scale)
487
+ module.bias.data.mul_(head_init_scale)
488
+
489
+
490
+ def _cfg(url='', **kwargs):
491
+ return {
492
+ 'url': url,
493
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
494
+ 'crop_pct': .9, 'interpolation': 'bicubic',
495
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
496
+ 'first_conv': 'stem.proj', 'classifier': 'head.fc',
497
+ 'license': 'mit', **kwargs
498
+ }
499
+
500
+
501
+ default_cfgs = generate_default_cfgs({
502
+ "focalnet_tiny_srf.ms_in1k": _cfg(
503
+ hf_hub_id='timm/'),
504
+ "focalnet_small_srf.ms_in1k": _cfg(
505
+ hf_hub_id='timm/'),
506
+ "focalnet_base_srf.ms_in1k": _cfg(
507
+ hf_hub_id='timm/'),
508
+ "focalnet_tiny_lrf.ms_in1k": _cfg(
509
+ hf_hub_id='timm/'),
510
+ "focalnet_small_lrf.ms_in1k": _cfg(
511
+ hf_hub_id='timm/'),
512
+ "focalnet_base_lrf.ms_in1k": _cfg(
513
+ hf_hub_id='timm/'),
514
+
515
+ "focalnet_large_fl3.ms_in22k": _cfg(
516
+ hf_hub_id='timm/',
517
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
518
+ "focalnet_large_fl4.ms_in22k": _cfg(
519
+ hf_hub_id='timm/',
520
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
521
+ "focalnet_xlarge_fl3.ms_in22k": _cfg(
522
+ hf_hub_id='timm/',
523
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
524
+ "focalnet_xlarge_fl4.ms_in22k": _cfg(
525
+ hf_hub_id='timm/',
526
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, num_classes=21842),
527
+ "focalnet_huge_fl3.ms_in22k": _cfg(
528
+ hf_hub_id='timm/',
529
+ num_classes=21842),
530
+ "focalnet_huge_fl4.ms_in22k": _cfg(
531
+ hf_hub_id='timm/',
532
+ num_classes=0),
533
+ })
534
+
535
+
536
+ def checkpoint_filter_fn(state_dict, model: FocalNet):
537
+ state_dict = state_dict.get('model', state_dict)
538
+ if 'stem.proj.weight' in state_dict:
539
+ return state_dict
540
+ import re
541
+ out_dict = {}
542
+ dest_dict = model.state_dict()
543
+ for k, v in state_dict.items():
544
+ k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
545
+ k = k.replace('patch_embed', 'stem')
546
+ k = re.sub(r'layers.(\d+).downsample', lambda x: f'layers.{int(x.group(1)) + 1}.downsample', k)
547
+ if 'norm' in k and k not in dest_dict:
548
+ k = re.sub(r'norm([0-9])', r'norm\1_post', k)
549
+ k = k.replace('ln.', 'norm.')
550
+ k = k.replace('head', 'head.fc')
551
+ if k in dest_dict and dest_dict[k].numel() == v.numel() and dest_dict[k].shape != v.shape:
552
+ v = v.reshape(dest_dict[k].shape)
553
+ out_dict[k] = v
554
+ return out_dict
555
+
556
+
557
+ def _create_focalnet(variant, pretrained=False, **kwargs):
558
+ default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
559
+ out_indices = kwargs.pop('out_indices', default_out_indices)
560
+
561
+ model = build_model_with_cfg(
562
+ FocalNet, variant, pretrained,
563
+ pretrained_filter_fn=checkpoint_filter_fn,
564
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
565
+ **kwargs)
566
+ return model
567
+
568
+
569
+ @register_model
570
+ def focalnet_tiny_srf(pretrained=False, **kwargs) -> FocalNet:
571
+ model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, **kwargs)
572
+ return _create_focalnet('focalnet_tiny_srf', pretrained=pretrained, **model_kwargs)
573
+
574
+
575
+ @register_model
576
+ def focalnet_small_srf(pretrained=False, **kwargs) -> FocalNet:
577
+ model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, **kwargs)
578
+ return _create_focalnet('focalnet_small_srf', pretrained=pretrained, **model_kwargs)
579
+
580
+
581
+ @register_model
582
+ def focalnet_base_srf(pretrained=False, **kwargs) -> FocalNet:
583
+ model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, **kwargs)
584
+ return _create_focalnet('focalnet_base_srf', pretrained=pretrained, **model_kwargs)
585
+
586
+
587
+ @register_model
588
+ def focalnet_tiny_lrf(pretrained=False, **kwargs) -> FocalNet:
589
+ model_kwargs = dict(depths=[2, 2, 6, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
590
+ return _create_focalnet('focalnet_tiny_lrf', pretrained=pretrained, **model_kwargs)
591
+
592
+
593
+ @register_model
594
+ def focalnet_small_lrf(pretrained=False, **kwargs) -> FocalNet:
595
+ model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=96, focal_levels=[3, 3, 3, 3], **kwargs)
596
+ return _create_focalnet('focalnet_small_lrf', pretrained=pretrained, **model_kwargs)
597
+
598
+
599
+ @register_model
600
+ def focalnet_base_lrf(pretrained=False, **kwargs) -> FocalNet:
601
+ model_kwargs = dict(depths=[2, 2, 18, 2], embed_dim=128, focal_levels=[3, 3, 3, 3], **kwargs)
602
+ return _create_focalnet('focalnet_base_lrf', pretrained=pretrained, **model_kwargs)
603
+
604
+
605
+ # FocalNet large+ models
606
+ @register_model
607
+ def focalnet_large_fl3(pretrained=False, **kwargs) -> FocalNet:
608
+ model_kwargs = dict(
609
+ depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
610
+ use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
611
+ return _create_focalnet('focalnet_large_fl3', pretrained=pretrained, **model_kwargs)
612
+
613
+
614
+ @register_model
615
+ def focalnet_large_fl4(pretrained=False, **kwargs) -> FocalNet:
616
+ model_kwargs = dict(
617
+ depths=[2, 2, 18, 2], embed_dim=192, focal_levels=[4, 4, 4, 4],
618
+ use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
619
+ return _create_focalnet('focalnet_large_fl4', pretrained=pretrained, **model_kwargs)
620
+
621
+
622
+ @register_model
623
+ def focalnet_xlarge_fl3(pretrained=False, **kwargs) -> FocalNet:
624
+ model_kwargs = dict(
625
+ depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[3, 3, 3, 3], focal_windows=[5] * 4,
626
+ use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
627
+ return _create_focalnet('focalnet_xlarge_fl3', pretrained=pretrained, **model_kwargs)
628
+
629
+
630
+ @register_model
631
+ def focalnet_xlarge_fl4(pretrained=False, **kwargs) -> FocalNet:
632
+ model_kwargs = dict(
633
+ depths=[2, 2, 18, 2], embed_dim=256, focal_levels=[4, 4, 4, 4],
634
+ use_post_norm=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
635
+ return _create_focalnet('focalnet_xlarge_fl4', pretrained=pretrained, **model_kwargs)
636
+
637
+
638
+ @register_model
639
+ def focalnet_huge_fl3(pretrained=False, **kwargs) -> FocalNet:
640
+ model_kwargs = dict(
641
+ depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[3, 3, 3, 3], focal_windows=[3] * 4,
642
+ use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
643
+ return _create_focalnet('focalnet_huge_fl3', pretrained=pretrained, **model_kwargs)
644
+
645
+
646
+ @register_model
647
+ def focalnet_huge_fl4(pretrained=False, **kwargs) -> FocalNet:
648
+ model_kwargs = dict(
649
+ depths=[2, 2, 18, 2], embed_dim=352, focal_levels=[4, 4, 4, 4],
650
+ use_post_norm=True, use_post_norm_in_modulation=True, use_overlap_down=True, layerscale_value=1e-4, **kwargs)
651
+ return _create_focalnet('focalnet_huge_fl4', pretrained=pretrained, **model_kwargs)
652
+
pytorch-image-models/timm/models/fx_features.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from ._features_fx import *
2
+
3
+ import warnings
4
+ warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning)
pytorch-image-models/timm/models/gcvit.py ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Global Context ViT
2
+
3
+ From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py
4
+
5
+ Global Context Vision Transformers -https://arxiv.org/abs/2206.09959
6
+
7
+ @article{hatamizadeh2022global,
8
+ title={Global Context Vision Transformers},
9
+ author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo},
10
+ journal={arXiv preprint arXiv:2206.09959},
11
+ year={2022}
12
+ }
13
+
14
+ Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit.
15
+ The license for this code release is Apache 2.0 with no commercial restrictions.
16
+
17
+ However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license
18
+ (https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones...
19
+
20
+ Hacked together by / Copyright 2022, Ross Wightman
21
+ """
22
+ import math
23
+ from functools import partial
24
+ from typing import Callable, List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import torch.nn as nn
28
+ import torch.utils.checkpoint as checkpoint
29
+
30
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
31
+ from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \
32
+ get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert
33
+ from ._builder import build_model_with_cfg
34
+ from ._features_fx import register_notrace_function
35
+ from ._manipulate import named_apply
36
+ from ._registry import register_model, generate_default_cfgs
37
+
38
+ __all__ = ['GlobalContextVit']
39
+
40
+
41
+ class MbConvBlock(nn.Module):
42
+ """ A depthwise separable / fused mbconv style residual block with SE, `no norm.
43
+ """
44
+ def __init__(
45
+ self,
46
+ in_chs,
47
+ out_chs=None,
48
+ expand_ratio=1.0,
49
+ attn_layer='se',
50
+ bias=False,
51
+ act_layer=nn.GELU,
52
+ ):
53
+ super().__init__()
54
+ attn_kwargs = dict(act_layer=act_layer)
55
+ if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca':
56
+ attn_kwargs['rd_ratio'] = 0.25
57
+ attn_kwargs['bias'] = False
58
+ attn_layer = get_attn(attn_layer)
59
+ out_chs = out_chs or in_chs
60
+ mid_chs = int(expand_ratio * in_chs)
61
+
62
+ self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias)
63
+ self.act = act_layer()
64
+ self.se = attn_layer(mid_chs, **attn_kwargs)
65
+ self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias)
66
+
67
+ def forward(self, x):
68
+ shortcut = x
69
+ x = self.conv_dw(x)
70
+ x = self.act(x)
71
+ x = self.se(x)
72
+ x = self.conv_pw(x)
73
+ x = x + shortcut
74
+ return x
75
+
76
+
77
+ class Downsample2d(nn.Module):
78
+ def __init__(
79
+ self,
80
+ dim,
81
+ dim_out=None,
82
+ reduction='conv',
83
+ act_layer=nn.GELU,
84
+ norm_layer=LayerNorm2d, # NOTE in NCHW
85
+ ):
86
+ super().__init__()
87
+ dim_out = dim_out or dim
88
+
89
+ self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity()
90
+ self.conv_block = MbConvBlock(dim, act_layer=act_layer)
91
+ assert reduction in ('conv', 'max', 'avg')
92
+ if reduction == 'conv':
93
+ self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False)
94
+ elif reduction == 'max':
95
+ assert dim == dim_out
96
+ self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
97
+ else:
98
+ assert dim == dim_out
99
+ self.reduction = nn.AvgPool2d(kernel_size=2)
100
+ self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity()
101
+
102
+ def forward(self, x):
103
+ x = self.norm1(x)
104
+ x = self.conv_block(x)
105
+ x = self.reduction(x)
106
+ x = self.norm2(x)
107
+ return x
108
+
109
+
110
+ class FeatureBlock(nn.Module):
111
+ def __init__(
112
+ self,
113
+ dim,
114
+ levels=0,
115
+ reduction='max',
116
+ act_layer=nn.GELU,
117
+ ):
118
+ super().__init__()
119
+ reductions = levels
120
+ levels = max(1, levels)
121
+ if reduction == 'avg':
122
+ pool_fn = partial(nn.AvgPool2d, kernel_size=2)
123
+ else:
124
+ pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1)
125
+ self.blocks = nn.Sequential()
126
+ for i in range(levels):
127
+ self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer))
128
+ if reductions:
129
+ self.blocks.add_module(f'pool{i+1}', pool_fn())
130
+ reductions -= 1
131
+
132
+ def forward(self, x):
133
+ return self.blocks(x)
134
+
135
+
136
+ class Stem(nn.Module):
137
+ def __init__(
138
+ self,
139
+ in_chs: int = 3,
140
+ out_chs: int = 96,
141
+ act_layer: Callable = nn.GELU,
142
+ norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW
143
+ ):
144
+ super().__init__()
145
+ self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1)
146
+ self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer)
147
+
148
+ def forward(self, x):
149
+ x = self.conv1(x)
150
+ x = self.down(x)
151
+ return x
152
+
153
+
154
+ class WindowAttentionGlobal(nn.Module):
155
+
156
+ def __init__(
157
+ self,
158
+ dim: int,
159
+ num_heads: int,
160
+ window_size: Tuple[int, int],
161
+ use_global: bool = True,
162
+ qkv_bias: bool = True,
163
+ attn_drop: float = 0.,
164
+ proj_drop: float = 0.,
165
+ ):
166
+ super().__init__()
167
+ window_size = to_2tuple(window_size)
168
+ self.window_size = window_size
169
+ self.num_heads = num_heads
170
+ self.head_dim = dim // num_heads
171
+ self.scale = self.head_dim ** -0.5
172
+ self.use_global = use_global
173
+
174
+ self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads)
175
+ if self.use_global:
176
+ self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias)
177
+ else:
178
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
179
+ self.attn_drop = nn.Dropout(attn_drop)
180
+ self.proj = nn.Linear(dim, dim)
181
+ self.proj_drop = nn.Dropout(proj_drop)
182
+
183
+ def forward(self, x, q_global: Optional[torch.Tensor] = None):
184
+ B, N, C = x.shape
185
+ if self.use_global and q_global is not None:
186
+ _assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal')
187
+
188
+ kv = self.qkv(x)
189
+ kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
190
+ k, v = kv.unbind(0)
191
+
192
+ q = q_global.repeat(B // q_global.shape[0], 1, 1, 1)
193
+ q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
194
+ else:
195
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
196
+ q, k, v = qkv.unbind(0)
197
+ q = q * self.scale
198
+
199
+ attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0
200
+ attn = self.rel_pos(attn)
201
+ attn = attn.softmax(dim=-1)
202
+ attn = self.attn_drop(attn)
203
+
204
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
205
+ x = self.proj(x)
206
+ x = self.proj_drop(x)
207
+ return x
208
+
209
+
210
+ def window_partition(x, window_size: Tuple[int, int]):
211
+ B, H, W, C = x.shape
212
+ x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
213
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
214
+ return windows
215
+
216
+
217
+ @register_notrace_function # reason: int argument is a Proxy
218
+ def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
219
+ H, W = img_size
220
+ C = windows.shape[-1]
221
+ x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
222
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
223
+ return x
224
+
225
+
226
+ class LayerScale(nn.Module):
227
+ def __init__(self, dim, init_values=1e-5, inplace=False):
228
+ super().__init__()
229
+ self.inplace = inplace
230
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
231
+
232
+ def forward(self, x):
233
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
234
+
235
+
236
+ class GlobalContextVitBlock(nn.Module):
237
+ def __init__(
238
+ self,
239
+ dim: int,
240
+ feat_size: Tuple[int, int],
241
+ num_heads: int,
242
+ window_size: int = 7,
243
+ mlp_ratio: float = 4.,
244
+ use_global: bool = True,
245
+ qkv_bias: bool = True,
246
+ layer_scale: Optional[float] = None,
247
+ proj_drop: float = 0.,
248
+ attn_drop: float = 0.,
249
+ drop_path: float = 0.,
250
+ attn_layer: Callable = WindowAttentionGlobal,
251
+ act_layer: Callable = nn.GELU,
252
+ norm_layer: Callable = nn.LayerNorm,
253
+ ):
254
+ super().__init__()
255
+ feat_size = to_2tuple(feat_size)
256
+ window_size = to_2tuple(window_size)
257
+ self.window_size = window_size
258
+ self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1]))
259
+
260
+ self.norm1 = norm_layer(dim)
261
+ self.attn = attn_layer(
262
+ dim,
263
+ num_heads=num_heads,
264
+ window_size=window_size,
265
+ use_global=use_global,
266
+ qkv_bias=qkv_bias,
267
+ attn_drop=attn_drop,
268
+ proj_drop=proj_drop,
269
+ )
270
+ self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
271
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
272
+
273
+ self.norm2 = norm_layer(dim)
274
+ self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
275
+ self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
276
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
277
+
278
+ def _window_attn(self, x, q_global: Optional[torch.Tensor] = None):
279
+ B, H, W, C = x.shape
280
+ x_win = window_partition(x, self.window_size)
281
+ x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C)
282
+ attn_win = self.attn(x_win, q_global)
283
+ x = window_reverse(attn_win, self.window_size, (H, W))
284
+ return x
285
+
286
+ def forward(self, x, q_global: Optional[torch.Tensor] = None):
287
+ x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global)))
288
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
289
+ return x
290
+
291
+
292
+ class GlobalContextVitStage(nn.Module):
293
+ def __init__(
294
+ self,
295
+ dim,
296
+ depth: int,
297
+ num_heads: int,
298
+ feat_size: Tuple[int, int],
299
+ window_size: Tuple[int, int],
300
+ downsample: bool = True,
301
+ global_norm: bool = False,
302
+ stage_norm: bool = False,
303
+ mlp_ratio: float = 4.,
304
+ qkv_bias: bool = True,
305
+ layer_scale: Optional[float] = None,
306
+ proj_drop: float = 0.,
307
+ attn_drop: float = 0.,
308
+ drop_path: Union[List[float], float] = 0.0,
309
+ act_layer: Callable = nn.GELU,
310
+ norm_layer: Callable = nn.LayerNorm,
311
+ norm_layer_cl: Callable = LayerNorm2d,
312
+ ):
313
+ super().__init__()
314
+ if downsample:
315
+ self.downsample = Downsample2d(
316
+ dim=dim,
317
+ dim_out=dim * 2,
318
+ norm_layer=norm_layer,
319
+ )
320
+ dim = dim * 2
321
+ feat_size = (feat_size[0] // 2, feat_size[1] // 2)
322
+ else:
323
+ self.downsample = nn.Identity()
324
+ self.feat_size = feat_size
325
+ window_size = to_2tuple(window_size)
326
+
327
+ feat_levels = int(math.log2(min(feat_size) / min(window_size)))
328
+ self.global_block = FeatureBlock(dim, feat_levels)
329
+ self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity()
330
+
331
+ self.blocks = nn.ModuleList([
332
+ GlobalContextVitBlock(
333
+ dim=dim,
334
+ num_heads=num_heads,
335
+ feat_size=feat_size,
336
+ window_size=window_size,
337
+ mlp_ratio=mlp_ratio,
338
+ qkv_bias=qkv_bias,
339
+ use_global=(i % 2 != 0),
340
+ layer_scale=layer_scale,
341
+ proj_drop=proj_drop,
342
+ attn_drop=attn_drop,
343
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
344
+ act_layer=act_layer,
345
+ norm_layer=norm_layer_cl,
346
+ )
347
+ for i in range(depth)
348
+ ])
349
+ self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity()
350
+ self.dim = dim
351
+ self.feat_size = feat_size
352
+ self.grad_checkpointing = False
353
+
354
+ def forward(self, x):
355
+ # input NCHW, downsample & global block are 2d conv + pooling
356
+ x = self.downsample(x)
357
+ global_query = self.global_block(x)
358
+
359
+ # reshape NCHW --> NHWC for transformer blocks
360
+ x = x.permute(0, 2, 3, 1)
361
+ global_query = self.global_norm(global_query.permute(0, 2, 3, 1))
362
+ for blk in self.blocks:
363
+ if self.grad_checkpointing and not torch.jit.is_scripting():
364
+ x = checkpoint.checkpoint(blk, x)
365
+ else:
366
+ x = blk(x, global_query)
367
+ x = self.norm(x)
368
+ x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW
369
+ return x
370
+
371
+
372
+ class GlobalContextVit(nn.Module):
373
+ def __init__(
374
+ self,
375
+ in_chans: int = 3,
376
+ num_classes: int = 1000,
377
+ global_pool: str = 'avg',
378
+ img_size: Tuple[int, int] = 224,
379
+ window_ratio: Tuple[int, ...] = (32, 32, 16, 32),
380
+ window_size: Tuple[int, ...] = None,
381
+ embed_dim: int = 64,
382
+ depths: Tuple[int, ...] = (3, 4, 19, 5),
383
+ num_heads: Tuple[int, ...] = (2, 4, 8, 16),
384
+ mlp_ratio: float = 3.0,
385
+ qkv_bias: bool = True,
386
+ layer_scale: Optional[float] = None,
387
+ drop_rate: float = 0.,
388
+ proj_drop_rate: float = 0.,
389
+ attn_drop_rate: float = 0.,
390
+ drop_path_rate: float = 0.,
391
+ weight_init='',
392
+ act_layer: str = 'gelu',
393
+ norm_layer: str = 'layernorm2d',
394
+ norm_layer_cl: str = 'layernorm',
395
+ norm_eps: float = 1e-5,
396
+ ):
397
+ super().__init__()
398
+ act_layer = get_act_layer(act_layer)
399
+ norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
400
+ norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
401
+
402
+ img_size = to_2tuple(img_size)
403
+ feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4
404
+ self.global_pool = global_pool
405
+ self.num_classes = num_classes
406
+ self.drop_rate = drop_rate
407
+ num_stages = len(depths)
408
+ self.num_features = self.head_hidden_size = int(embed_dim * 2 ** (num_stages - 1))
409
+ if window_size is not None:
410
+ window_size = to_ntuple(num_stages)(window_size)
411
+ else:
412
+ assert window_ratio is not None
413
+ window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)])
414
+
415
+ self.stem = Stem(
416
+ in_chs=in_chans,
417
+ out_chs=embed_dim,
418
+ act_layer=act_layer,
419
+ norm_layer=norm_layer
420
+ )
421
+
422
+ dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
423
+ stages = []
424
+ for i in range(num_stages):
425
+ last_stage = i == num_stages - 1
426
+ stage_scale = 2 ** max(i - 1, 0)
427
+ stages.append(GlobalContextVitStage(
428
+ dim=embed_dim * stage_scale,
429
+ depth=depths[i],
430
+ num_heads=num_heads[i],
431
+ feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale),
432
+ window_size=window_size[i],
433
+ downsample=i != 0,
434
+ stage_norm=last_stage,
435
+ mlp_ratio=mlp_ratio,
436
+ qkv_bias=qkv_bias,
437
+ layer_scale=layer_scale,
438
+ proj_drop=proj_drop_rate,
439
+ attn_drop=attn_drop_rate,
440
+ drop_path=dpr[i],
441
+ act_layer=act_layer,
442
+ norm_layer=norm_layer,
443
+ norm_layer_cl=norm_layer_cl,
444
+ ))
445
+ self.stages = nn.Sequential(*stages)
446
+
447
+ # Classifier head
448
+ self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
449
+
450
+ if weight_init:
451
+ named_apply(partial(self._init_weights, scheme=weight_init), self)
452
+
453
+ def _init_weights(self, module, name, scheme='vit'):
454
+ # note Conv2d left as default init
455
+ if scheme == 'vit':
456
+ if isinstance(module, nn.Linear):
457
+ nn.init.xavier_uniform_(module.weight)
458
+ if module.bias is not None:
459
+ if 'mlp' in name:
460
+ nn.init.normal_(module.bias, std=1e-6)
461
+ else:
462
+ nn.init.zeros_(module.bias)
463
+ else:
464
+ if isinstance(module, nn.Linear):
465
+ nn.init.normal_(module.weight, std=.02)
466
+ if module.bias is not None:
467
+ nn.init.zeros_(module.bias)
468
+
469
+ @torch.jit.ignore
470
+ def no_weight_decay(self):
471
+ return {
472
+ k for k, _ in self.named_parameters()
473
+ if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
474
+
475
+ @torch.jit.ignore
476
+ def group_matcher(self, coarse=False):
477
+ matcher = dict(
478
+ stem=r'^stem', # stem and embed
479
+ blocks=r'^stages\.(\d+)'
480
+ )
481
+ return matcher
482
+
483
+ @torch.jit.ignore
484
+ def set_grad_checkpointing(self, enable=True):
485
+ for s in self.stages:
486
+ s.grad_checkpointing = enable
487
+
488
+ @torch.jit.ignore
489
+ def get_classifier(self) -> nn.Module:
490
+ return self.head.fc
491
+
492
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
493
+ self.num_classes = num_classes
494
+ if global_pool is None:
495
+ global_pool = self.head.global_pool.pool_type
496
+ self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
497
+
498
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
499
+ x = self.stem(x)
500
+ x = self.stages(x)
501
+ return x
502
+
503
+ def forward_head(self, x, pre_logits: bool = False):
504
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
505
+
506
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
507
+ x = self.forward_features(x)
508
+ x = self.forward_head(x)
509
+ return x
510
+
511
+
512
+ def _create_gcvit(variant, pretrained=False, **kwargs):
513
+ if kwargs.get('features_only', None):
514
+ raise RuntimeError('features_only not implemented for Vision Transformer models.')
515
+ model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs)
516
+ return model
517
+
518
+
519
+ def _cfg(url='', **kwargs):
520
+ return {
521
+ 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
522
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
523
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
524
+ 'first_conv': 'stem.conv1', 'classifier': 'head.fc',
525
+ 'fixed_input_size': True,
526
+ **kwargs
527
+ }
528
+
529
+
530
+ default_cfgs = generate_default_cfgs({
531
+ 'gcvit_xxtiny.in1k': _cfg(
532
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'),
533
+ 'gcvit_xtiny.in1k': _cfg(
534
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'),
535
+ 'gcvit_tiny.in1k': _cfg(
536
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'),
537
+ 'gcvit_small.in1k': _cfg(
538
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'),
539
+ 'gcvit_base.in1k': _cfg(
540
+ url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'),
541
+ })
542
+
543
+
544
+ @register_model
545
+ def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit:
546
+ model_kwargs = dict(
547
+ depths=(2, 2, 6, 2),
548
+ num_heads=(2, 4, 8, 16),
549
+ **kwargs)
550
+ return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs)
551
+
552
+
553
+ @register_model
554
+ def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit:
555
+ model_kwargs = dict(
556
+ depths=(3, 4, 6, 5),
557
+ num_heads=(2, 4, 8, 16),
558
+ **kwargs)
559
+ return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs)
560
+
561
+
562
+ @register_model
563
+ def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit:
564
+ model_kwargs = dict(
565
+ depths=(3, 4, 19, 5),
566
+ num_heads=(2, 4, 8, 16),
567
+ **kwargs)
568
+ return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs)
569
+
570
+
571
+ @register_model
572
+ def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit:
573
+ model_kwargs = dict(
574
+ depths=(3, 4, 19, 5),
575
+ num_heads=(3, 6, 12, 24),
576
+ embed_dim=96,
577
+ mlp_ratio=2,
578
+ layer_scale=1e-5,
579
+ **kwargs)
580
+ return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs)
581
+
582
+
583
+ @register_model
584
+ def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit:
585
+ model_kwargs = dict(
586
+ depths=(3, 4, 19, 5),
587
+ num_heads=(4, 8, 16, 32),
588
+ embed_dim=128,
589
+ mlp_ratio=2,
590
+ layer_scale=1e-5,
591
+ **kwargs)
592
+ return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs)
pytorch-image-models/timm/models/ghostnet.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ An implementation of GhostNet & GhostNetV2 Models as defined in:
3
+ GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907
4
+ GhostNetV2: Enhance Cheap Operation with Long-Range Attention. https://proceedings.neurips.cc/paper_files/paper/2022/file/40b60852a4abdaa696b5a1a78da34635-Paper-Conference.pdf
5
+
6
+ The train script & code of models at:
7
+ Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch
8
+ Original model: https://github.com/huawei-noah/Efficient-AI-Backbones/blob/master/ghostnetv2_pytorch/model/ghostnetv2_torch.py
9
+ """
10
+ import math
11
+ from functools import partial
12
+ from typing import Optional
13
+
14
+ import torch
15
+ import torch.nn as nn
16
+ import torch.nn.functional as F
17
+
18
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
19
+ from timm.layers import SelectAdaptivePool2d, Linear, make_divisible
20
+ from ._builder import build_model_with_cfg
21
+ from ._efficientnet_blocks import SqueezeExcite, ConvBnAct
22
+ from ._manipulate import checkpoint_seq
23
+ from ._registry import register_model, generate_default_cfgs
24
+
25
+ __all__ = ['GhostNet']
26
+
27
+
28
+ _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4))
29
+
30
+
31
+ class GhostModule(nn.Module):
32
+ def __init__(
33
+ self,
34
+ in_chs,
35
+ out_chs,
36
+ kernel_size=1,
37
+ ratio=2,
38
+ dw_size=3,
39
+ stride=1,
40
+ use_act=True,
41
+ act_layer=nn.ReLU,
42
+ ):
43
+ super(GhostModule, self).__init__()
44
+ self.out_chs = out_chs
45
+ init_chs = math.ceil(out_chs / ratio)
46
+ new_chs = init_chs * (ratio - 1)
47
+
48
+ self.primary_conv = nn.Sequential(
49
+ nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
50
+ nn.BatchNorm2d(init_chs),
51
+ act_layer(inplace=True) if use_act else nn.Identity(),
52
+ )
53
+
54
+ self.cheap_operation = nn.Sequential(
55
+ nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False),
56
+ nn.BatchNorm2d(new_chs),
57
+ act_layer(inplace=True) if use_act else nn.Identity(),
58
+ )
59
+
60
+ def forward(self, x):
61
+ x1 = self.primary_conv(x)
62
+ x2 = self.cheap_operation(x1)
63
+ out = torch.cat([x1, x2], dim=1)
64
+ return out[:, :self.out_chs, :, :]
65
+
66
+
67
+ class GhostModuleV2(nn.Module):
68
+ def __init__(
69
+ self,
70
+ in_chs,
71
+ out_chs,
72
+ kernel_size=1,
73
+ ratio=2,
74
+ dw_size=3,
75
+ stride=1,
76
+ use_act=True,
77
+ act_layer=nn.ReLU,
78
+ ):
79
+ super().__init__()
80
+ self.gate_fn = nn.Sigmoid()
81
+ self.out_chs = out_chs
82
+ init_chs = math.ceil(out_chs / ratio)
83
+ new_chs = init_chs * (ratio - 1)
84
+ self.primary_conv = nn.Sequential(
85
+ nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False),
86
+ nn.BatchNorm2d(init_chs),
87
+ act_layer(inplace=True) if use_act else nn.Identity(),
88
+ )
89
+ self.cheap_operation = nn.Sequential(
90
+ nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False),
91
+ nn.BatchNorm2d(new_chs),
92
+ act_layer(inplace=True) if use_act else nn.Identity(),
93
+ )
94
+ self.short_conv = nn.Sequential(
95
+ nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False),
96
+ nn.BatchNorm2d(out_chs),
97
+ nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False),
98
+ nn.BatchNorm2d(out_chs),
99
+ nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False),
100
+ nn.BatchNorm2d(out_chs),
101
+ )
102
+
103
+ def forward(self, x):
104
+ res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2))
105
+ x1 = self.primary_conv(x)
106
+ x2 = self.cheap_operation(x1)
107
+ out = torch.cat([x1, x2], dim=1)
108
+ return out[:, :self.out_chs, :, :] * F.interpolate(
109
+ self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest')
110
+
111
+
112
+ class GhostBottleneck(nn.Module):
113
+ """ Ghost bottleneck w/ optional SE"""
114
+
115
+ def __init__(
116
+ self,
117
+ in_chs,
118
+ mid_chs,
119
+ out_chs,
120
+ dw_kernel_size=3,
121
+ stride=1,
122
+ act_layer=nn.ReLU,
123
+ se_ratio=0.,
124
+ mode='original',
125
+ ):
126
+ super(GhostBottleneck, self).__init__()
127
+ has_se = se_ratio is not None and se_ratio > 0.
128
+ self.stride = stride
129
+
130
+ # Point-wise expansion
131
+ if mode == 'original':
132
+ self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer)
133
+ else:
134
+ self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer)
135
+
136
+ # Depth-wise convolution
137
+ if self.stride > 1:
138
+ self.conv_dw = nn.Conv2d(
139
+ mid_chs, mid_chs, dw_kernel_size, stride=stride,
140
+ padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False)
141
+ self.bn_dw = nn.BatchNorm2d(mid_chs)
142
+ else:
143
+ self.conv_dw = None
144
+ self.bn_dw = None
145
+
146
+ # Squeeze-and-excitation
147
+ self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None
148
+
149
+ # Point-wise linear projection
150
+ self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False)
151
+
152
+ # shortcut
153
+ if in_chs == out_chs and self.stride == 1:
154
+ self.shortcut = nn.Sequential()
155
+ else:
156
+ self.shortcut = nn.Sequential(
157
+ nn.Conv2d(
158
+ in_chs, in_chs, dw_kernel_size, stride=stride,
159
+ padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False),
160
+ nn.BatchNorm2d(in_chs),
161
+ nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
162
+ nn.BatchNorm2d(out_chs),
163
+ )
164
+
165
+ def forward(self, x):
166
+ shortcut = x
167
+
168
+ # 1st ghost bottleneck
169
+ x = self.ghost1(x)
170
+
171
+ # Depth-wise convolution
172
+ if self.conv_dw is not None:
173
+ x = self.conv_dw(x)
174
+ x = self.bn_dw(x)
175
+
176
+ # Squeeze-and-excitation
177
+ if self.se is not None:
178
+ x = self.se(x)
179
+
180
+ # 2nd ghost bottleneck
181
+ x = self.ghost2(x)
182
+
183
+ x += self.shortcut(shortcut)
184
+ return x
185
+
186
+
187
+ class GhostNet(nn.Module):
188
+ def __init__(
189
+ self,
190
+ cfgs,
191
+ num_classes=1000,
192
+ width=1.0,
193
+ in_chans=3,
194
+ output_stride=32,
195
+ global_pool='avg',
196
+ drop_rate=0.2,
197
+ version='v1',
198
+ ):
199
+ super(GhostNet, self).__init__()
200
+ # setting of inverted residual blocks
201
+ assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported'
202
+ self.cfgs = cfgs
203
+ self.num_classes = num_classes
204
+ self.drop_rate = drop_rate
205
+ self.grad_checkpointing = False
206
+ self.feature_info = []
207
+
208
+ # building first layer
209
+ stem_chs = make_divisible(16 * width, 4)
210
+ self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False)
211
+ self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem'))
212
+ self.bn1 = nn.BatchNorm2d(stem_chs)
213
+ self.act1 = nn.ReLU(inplace=True)
214
+ prev_chs = stem_chs
215
+
216
+ # building inverted residual blocks
217
+ stages = nn.ModuleList([])
218
+ stage_idx = 0
219
+ layer_idx = 0
220
+ net_stride = 2
221
+ for cfg in self.cfgs:
222
+ layers = []
223
+ s = 1
224
+ for k, exp_size, c, se_ratio, s in cfg:
225
+ out_chs = make_divisible(c * width, 4)
226
+ mid_chs = make_divisible(exp_size * width, 4)
227
+ layer_kwargs = {}
228
+ if version == 'v2' and layer_idx > 1:
229
+ layer_kwargs['mode'] = 'attn'
230
+ layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs))
231
+ prev_chs = out_chs
232
+ layer_idx += 1
233
+ if s > 1:
234
+ net_stride *= 2
235
+ self.feature_info.append(dict(
236
+ num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}'))
237
+ stages.append(nn.Sequential(*layers))
238
+ stage_idx += 1
239
+
240
+ out_chs = make_divisible(exp_size * width, 4)
241
+ stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1)))
242
+ self.pool_dim = prev_chs = out_chs
243
+
244
+ self.blocks = nn.Sequential(*stages)
245
+
246
+ # building last several layers
247
+ self.num_features = prev_chs
248
+ self.head_hidden_size = out_chs = 1280
249
+ self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
250
+ self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True)
251
+ self.act2 = nn.ReLU(inplace=True)
252
+ self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
253
+ self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity()
254
+
255
+ # FIXME init
256
+
257
+ @torch.jit.ignore
258
+ def group_matcher(self, coarse=False):
259
+ matcher = dict(
260
+ stem=r'^conv_stem|bn1',
261
+ blocks=[
262
+ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None),
263
+ (r'conv_head', (99999,))
264
+ ]
265
+ )
266
+ return matcher
267
+
268
+ @torch.jit.ignore
269
+ def set_grad_checkpointing(self, enable=True):
270
+ self.grad_checkpointing = enable
271
+
272
+ @torch.jit.ignore
273
+ def get_classifier(self) -> nn.Module:
274
+ return self.classifier
275
+
276
+ def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
277
+ self.num_classes = num_classes
278
+ # cannot meaningfully change pooling of efficient head after creation
279
+ self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
280
+ self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled
281
+ self.classifier = Linear(self.head_hidden_size, num_classes) if num_classes > 0 else nn.Identity()
282
+
283
+ def forward_features(self, x):
284
+ x = self.conv_stem(x)
285
+ x = self.bn1(x)
286
+ x = self.act1(x)
287
+ if self.grad_checkpointing and not torch.jit.is_scripting():
288
+ x = checkpoint_seq(self.blocks, x, flatten=True)
289
+ else:
290
+ x = self.blocks(x)
291
+ return x
292
+
293
+ def forward_head(self, x, pre_logits: bool = False):
294
+ x = self.global_pool(x)
295
+ x = self.conv_head(x)
296
+ x = self.act2(x)
297
+ x = self.flatten(x)
298
+ if self.drop_rate > 0.:
299
+ x = F.dropout(x, p=self.drop_rate, training=self.training)
300
+ return x if pre_logits else self.classifier(x)
301
+
302
+ def forward(self, x):
303
+ x = self.forward_features(x)
304
+ x = self.forward_head(x)
305
+ return x
306
+
307
+
308
+ def checkpoint_filter_fn(state_dict, model: nn.Module):
309
+ out_dict = {}
310
+ for k, v in state_dict.items():
311
+ if 'total' in k:
312
+ continue
313
+ out_dict[k] = v
314
+ return out_dict
315
+
316
+
317
+ def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs):
318
+ """
319
+ Constructs a GhostNet model
320
+ """
321
+ cfgs = [
322
+ # k, t, c, SE, s
323
+ # stage1
324
+ [[3, 16, 16, 0, 1]],
325
+ # stage2
326
+ [[3, 48, 24, 0, 2]],
327
+ [[3, 72, 24, 0, 1]],
328
+ # stage3
329
+ [[5, 72, 40, 0.25, 2]],
330
+ [[5, 120, 40, 0.25, 1]],
331
+ # stage4
332
+ [[3, 240, 80, 0, 2]],
333
+ [[3, 200, 80, 0, 1],
334
+ [3, 184, 80, 0, 1],
335
+ [3, 184, 80, 0, 1],
336
+ [3, 480, 112, 0.25, 1],
337
+ [3, 672, 112, 0.25, 1]
338
+ ],
339
+ # stage5
340
+ [[5, 672, 160, 0.25, 2]],
341
+ [[5, 960, 160, 0, 1],
342
+ [5, 960, 160, 0.25, 1],
343
+ [5, 960, 160, 0, 1],
344
+ [5, 960, 160, 0.25, 1]
345
+ ]
346
+ ]
347
+ model_kwargs = dict(
348
+ cfgs=cfgs,
349
+ width=width,
350
+ **kwargs,
351
+ )
352
+ return build_model_with_cfg(
353
+ GhostNet,
354
+ variant,
355
+ pretrained,
356
+ pretrained_filter_fn=checkpoint_filter_fn,
357
+ feature_cfg=dict(flatten_sequential=True),
358
+ **model_kwargs,
359
+ )
360
+
361
+
362
+ def _cfg(url='', **kwargs):
363
+ return {
364
+ 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
365
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
366
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
367
+ 'first_conv': 'conv_stem', 'classifier': 'classifier',
368
+ **kwargs
369
+ }
370
+
371
+
372
+ default_cfgs = generate_default_cfgs({
373
+ 'ghostnet_050.untrained': _cfg(),
374
+ 'ghostnet_100.in1k': _cfg(
375
+ hf_hub_id='timm/',
376
+ # url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth'
377
+ ),
378
+ 'ghostnet_130.untrained': _cfg(),
379
+ 'ghostnetv2_100.in1k': _cfg(
380
+ hf_hub_id='timm/',
381
+ # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_10.pth.tar'
382
+ ),
383
+ 'ghostnetv2_130.in1k': _cfg(
384
+ hf_hub_id='timm/',
385
+ # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_13.pth.tar'
386
+ ),
387
+ 'ghostnetv2_160.in1k': _cfg(
388
+ hf_hub_id='timm/',
389
+ # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_16.pth.tar'
390
+ ),
391
+ })
392
+
393
+
394
+ @register_model
395
+ def ghostnet_050(pretrained=False, **kwargs) -> GhostNet:
396
+ """ GhostNet-0.5x """
397
+ model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs)
398
+ return model
399
+
400
+
401
+ @register_model
402
+ def ghostnet_100(pretrained=False, **kwargs) -> GhostNet:
403
+ """ GhostNet-1.0x """
404
+ model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs)
405
+ return model
406
+
407
+
408
+ @register_model
409
+ def ghostnet_130(pretrained=False, **kwargs) -> GhostNet:
410
+ """ GhostNet-1.3x """
411
+ model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs)
412
+ return model
413
+
414
+
415
+ @register_model
416
+ def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet:
417
+ """ GhostNetV2-1.0x """
418
+ model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs)
419
+ return model
420
+
421
+
422
+ @register_model
423
+ def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet:
424
+ """ GhostNetV2-1.3x """
425
+ model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs)
426
+ return model
427
+
428
+
429
+ @register_model
430
+ def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet:
431
+ """ GhostNetV2-1.6x """
432
+ model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs)
433
+ return model
pytorch-image-models/timm/models/hardcorenas.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import torch.nn as nn
4
+
5
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
6
+ from ._builder import build_model_with_cfg
7
+ from ._builder import pretrained_cfg_for_features
8
+ from ._efficientnet_blocks import SqueezeExcite
9
+ from ._efficientnet_builder import decode_arch_def, resolve_act_layer, resolve_bn_args, round_channels
10
+ from ._registry import register_model, generate_default_cfgs
11
+ from .mobilenetv3 import MobileNetV3, MobileNetV3Features
12
+
13
+ __all__ = [] # model_registry will add each entrypoint fn to this
14
+
15
+
16
+ def _gen_hardcorenas(pretrained, variant, arch_def, **kwargs):
17
+ """Creates a hardcorenas model
18
+
19
+ Ref impl: https://github.com/Alibaba-MIIL/HardCoReNAS
20
+ Paper: https://arxiv.org/abs/2102.11646
21
+
22
+ """
23
+ num_features = 1280
24
+ se_layer = partial(SqueezeExcite, gate_layer='hard_sigmoid', force_act_layer=nn.ReLU, rd_round_fn=round_channels)
25
+ model_kwargs = dict(
26
+ block_args=decode_arch_def(arch_def),
27
+ num_features=num_features,
28
+ stem_size=32,
29
+ norm_layer=partial(nn.BatchNorm2d, **resolve_bn_args(kwargs)),
30
+ act_layer=resolve_act_layer(kwargs, 'hard_swish'),
31
+ se_layer=se_layer,
32
+ **kwargs,
33
+ )
34
+
35
+ features_only = False
36
+ model_cls = MobileNetV3
37
+ kwargs_filter = None
38
+ if model_kwargs.pop('features_only', False):
39
+ features_only = True
40
+ kwargs_filter = ('num_classes', 'num_features', 'global_pool', 'head_conv', 'head_bias', 'global_pool')
41
+ model_cls = MobileNetV3Features
42
+ model = build_model_with_cfg(
43
+ model_cls,
44
+ variant,
45
+ pretrained,
46
+ pretrained_strict=not features_only,
47
+ kwargs_filter=kwargs_filter,
48
+ **model_kwargs,
49
+ )
50
+ if features_only:
51
+ model.default_cfg = pretrained_cfg_for_features(model.default_cfg)
52
+ return model
53
+
54
+
55
+ def _cfg(url='', **kwargs):
56
+ return {
57
+ 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
58
+ 'crop_pct': 0.875, 'interpolation': 'bilinear',
59
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
60
+ 'first_conv': 'conv_stem', 'classifier': 'classifier',
61
+ **kwargs
62
+ }
63
+
64
+
65
+ default_cfgs = generate_default_cfgs({
66
+ 'hardcorenas_a.miil_green_in1k': _cfg(hf_hub_id='timm/'),
67
+ 'hardcorenas_b.miil_green_in1k': _cfg(hf_hub_id='timm/'),
68
+ 'hardcorenas_c.miil_green_in1k': _cfg(hf_hub_id='timm/'),
69
+ 'hardcorenas_d.miil_green_in1k': _cfg(hf_hub_id='timm/'),
70
+ 'hardcorenas_e.miil_green_in1k': _cfg(hf_hub_id='timm/'),
71
+ 'hardcorenas_f.miil_green_in1k': _cfg(hf_hub_id='timm/'),
72
+ })
73
+
74
+
75
+ @register_model
76
+ def hardcorenas_a(pretrained=False, **kwargs) -> MobileNetV3:
77
+ """ hardcorenas_A """
78
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
79
+ ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
80
+ ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25'],
81
+ ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25'],
82
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
83
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_a', arch_def=arch_def, **kwargs)
84
+ return model
85
+
86
+
87
+ @register_model
88
+ def hardcorenas_b(pretrained=False, **kwargs) -> MobileNetV3:
89
+ """ hardcorenas_B """
90
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'],
91
+ ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25', 'ir_r1_k3_s1_e3_c24_nre'],
92
+ ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre'],
93
+ ['ir_r1_k5_s2_e3_c80', 'ir_r1_k5_s1_e3_c80', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
94
+ ['ir_r1_k5_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
95
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
96
+ ['cn_r1_k1_s1_c960']]
97
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_b', arch_def=arch_def, **kwargs)
98
+ return model
99
+
100
+
101
+ @register_model
102
+ def hardcorenas_c(pretrained=False, **kwargs) -> MobileNetV3:
103
+ """ hardcorenas_C """
104
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
105
+ ['ir_r1_k5_s2_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre', 'ir_r1_k5_s1_e3_c40_nre',
106
+ 'ir_r1_k5_s1_e3_c40_nre'],
107
+ ['ir_r1_k5_s2_e4_c80', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80', 'ir_r1_k3_s1_e3_c80'],
108
+ ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112', 'ir_r1_k3_s1_e3_c112'],
109
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e3_c192_se0.25'],
110
+ ['cn_r1_k1_s1_c960']]
111
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_c', arch_def=arch_def, **kwargs)
112
+ return model
113
+
114
+
115
+ @register_model
116
+ def hardcorenas_d(pretrained=False, **kwargs) -> MobileNetV3:
117
+ """ hardcorenas_D """
118
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
119
+ ['ir_r1_k5_s2_e3_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k3_s1_e3_c40_nre_se0.25'],
120
+ ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
121
+ 'ir_r1_k3_s1_e3_c80_se0.25'],
122
+ ['ir_r1_k3_s1_e4_c112_se0.25', 'ir_r1_k5_s1_e4_c112_se0.25', 'ir_r1_k3_s1_e3_c112_se0.25',
123
+ 'ir_r1_k5_s1_e3_c112_se0.25'],
124
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
125
+ 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
126
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_d', arch_def=arch_def, **kwargs)
127
+ return model
128
+
129
+
130
+ @register_model
131
+ def hardcorenas_e(pretrained=False, **kwargs) -> MobileNetV3:
132
+ """ hardcorenas_E """
133
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
134
+ ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25', 'ir_r1_k5_s1_e4_c40_nre_se0.25',
135
+ 'ir_r1_k3_s1_e3_c40_nre_se0.25'], ['ir_r1_k5_s2_e4_c80_se0.25', 'ir_r1_k3_s1_e6_c80_se0.25'],
136
+ ['ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
137
+ 'ir_r1_k5_s1_e3_c112_se0.25'],
138
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25',
139
+ 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
140
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_e', arch_def=arch_def, **kwargs)
141
+ return model
142
+
143
+
144
+ @register_model
145
+ def hardcorenas_f(pretrained=False, **kwargs) -> MobileNetV3:
146
+ """ hardcorenas_F """
147
+ arch_def = [['ds_r1_k3_s1_e1_c16_nre'], ['ir_r1_k5_s2_e3_c24_nre_se0.25', 'ir_r1_k5_s1_e3_c24_nre_se0.25'],
148
+ ['ir_r1_k5_s2_e6_c40_nre_se0.25', 'ir_r1_k5_s1_e6_c40_nre_se0.25'],
149
+ ['ir_r1_k5_s2_e6_c80_se0.25', 'ir_r1_k5_s1_e6_c80_se0.25', 'ir_r1_k3_s1_e3_c80_se0.25',
150
+ 'ir_r1_k3_s1_e3_c80_se0.25'],
151
+ ['ir_r1_k3_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25', 'ir_r1_k5_s1_e6_c112_se0.25',
152
+ 'ir_r1_k3_s1_e3_c112_se0.25'],
153
+ ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k3_s1_e6_c192_se0.25',
154
+ 'ir_r1_k3_s1_e6_c192_se0.25'], ['cn_r1_k1_s1_c960']]
155
+ model = _gen_hardcorenas(pretrained=pretrained, variant='hardcorenas_f', arch_def=arch_def, **kwargs)
156
+ return model
pytorch-image-models/timm/models/helpers.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from ._builder import *
2
+ from ._helpers import *
3
+ from ._manipulate import *
4
+ from ._prune import *
5
+
6
+ import warnings
7
+ warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning)
pytorch-image-models/timm/models/hgnet.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PP-HGNet (V1 & V2)
2
+
3
+ Reference:
4
+ https://github.com/PaddlePaddle/PaddleClas/blob/develop/docs/zh_CN/models/ImageNet1k/PP-HGNetV2.md
5
+ The Paddle Implement of PP-HGNet (https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/docs/en/models/PP-HGNet_en.md)
6
+ PP-HGNet: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet.py
7
+ PP-HGNetv2: https://github.com/PaddlePaddle/PaddleClas/blob/release/2.5.1/ppcls/arch/backbone/legendary_models/pp_hgnet_v2.py
8
+ """
9
+ from typing import Dict, Optional
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
16
+ from timm.layers import SelectAdaptivePool2d, DropPath, create_conv2d
17
+ from ._builder import build_model_with_cfg
18
+ from ._registry import register_model, generate_default_cfgs
19
+ from ._manipulate import checkpoint_seq
20
+
21
+ __all__ = ['HighPerfGpuNet']
22
+
23
+
24
+ class LearnableAffineBlock(nn.Module):
25
+ def __init__(
26
+ self,
27
+ scale_value=1.0,
28
+ bias_value=0.0
29
+ ):
30
+ super().__init__()
31
+ self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True)
32
+ self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True)
33
+
34
+ def forward(self, x):
35
+ return self.scale * x + self.bias
36
+
37
+
38
+ class ConvBNAct(nn.Module):
39
+ def __init__(
40
+ self,
41
+ in_chs,
42
+ out_chs,
43
+ kernel_size,
44
+ stride=1,
45
+ groups=1,
46
+ padding='',
47
+ use_act=True,
48
+ use_lab=False
49
+ ):
50
+ super().__init__()
51
+ self.use_act = use_act
52
+ self.use_lab = use_lab
53
+ self.conv = create_conv2d(
54
+ in_chs,
55
+ out_chs,
56
+ kernel_size,
57
+ stride=stride,
58
+ padding=padding,
59
+ groups=groups,
60
+ )
61
+ self.bn = nn.BatchNorm2d(out_chs)
62
+ if self.use_act:
63
+ self.act = nn.ReLU()
64
+ else:
65
+ self.act = nn.Identity()
66
+ if self.use_act and self.use_lab:
67
+ self.lab = LearnableAffineBlock()
68
+ else:
69
+ self.lab = nn.Identity()
70
+
71
+ def forward(self, x):
72
+ x = self.conv(x)
73
+ x = self.bn(x)
74
+ x = self.act(x)
75
+ x = self.lab(x)
76
+ return x
77
+
78
+
79
+ class LightConvBNAct(nn.Module):
80
+ def __init__(
81
+ self,
82
+ in_chs,
83
+ out_chs,
84
+ kernel_size,
85
+ groups=1,
86
+ use_lab=False
87
+ ):
88
+ super().__init__()
89
+ self.conv1 = ConvBNAct(
90
+ in_chs,
91
+ out_chs,
92
+ kernel_size=1,
93
+ use_act=False,
94
+ use_lab=use_lab,
95
+ )
96
+ self.conv2 = ConvBNAct(
97
+ out_chs,
98
+ out_chs,
99
+ kernel_size=kernel_size,
100
+ groups=out_chs,
101
+ use_act=True,
102
+ use_lab=use_lab,
103
+ )
104
+
105
+ def forward(self, x):
106
+ x = self.conv1(x)
107
+ x = self.conv2(x)
108
+ return x
109
+
110
+
111
+ class EseModule(nn.Module):
112
+ def __init__(self, chs):
113
+ super().__init__()
114
+ self.conv = nn.Conv2d(
115
+ chs,
116
+ chs,
117
+ kernel_size=1,
118
+ stride=1,
119
+ padding=0,
120
+ )
121
+ self.sigmoid = nn.Sigmoid()
122
+
123
+ def forward(self, x):
124
+ identity = x
125
+ x = x.mean((2, 3), keepdim=True)
126
+ x = self.conv(x)
127
+ x = self.sigmoid(x)
128
+ return torch.mul(identity, x)
129
+
130
+
131
+ class StemV1(nn.Module):
132
+ # for PP-HGNet
133
+ def __init__(self, stem_chs):
134
+ super().__init__()
135
+ self.stem = nn.Sequential(*[
136
+ ConvBNAct(
137
+ stem_chs[i],
138
+ stem_chs[i + 1],
139
+ kernel_size=3,
140
+ stride=2 if i == 0 else 1) for i in range(
141
+ len(stem_chs) - 1)
142
+ ])
143
+ self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
144
+
145
+ def forward(self, x):
146
+ x = self.stem(x)
147
+ x = self.pool(x)
148
+ return x
149
+
150
+
151
+ class StemV2(nn.Module):
152
+ # for PP-HGNetv2
153
+ def __init__(self, in_chs, mid_chs, out_chs, use_lab=False):
154
+ super().__init__()
155
+ self.stem1 = ConvBNAct(
156
+ in_chs,
157
+ mid_chs,
158
+ kernel_size=3,
159
+ stride=2,
160
+ use_lab=use_lab,
161
+ )
162
+ self.stem2a = ConvBNAct(
163
+ mid_chs,
164
+ mid_chs // 2,
165
+ kernel_size=2,
166
+ stride=1,
167
+ use_lab=use_lab,
168
+ )
169
+ self.stem2b = ConvBNAct(
170
+ mid_chs // 2,
171
+ mid_chs,
172
+ kernel_size=2,
173
+ stride=1,
174
+ use_lab=use_lab,
175
+ )
176
+ self.stem3 = ConvBNAct(
177
+ mid_chs * 2,
178
+ mid_chs,
179
+ kernel_size=3,
180
+ stride=2,
181
+ use_lab=use_lab,
182
+ )
183
+ self.stem4 = ConvBNAct(
184
+ mid_chs,
185
+ out_chs,
186
+ kernel_size=1,
187
+ stride=1,
188
+ use_lab=use_lab,
189
+ )
190
+ self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True)
191
+
192
+ def forward(self, x):
193
+ x = self.stem1(x)
194
+ x = F.pad(x, (0, 1, 0, 1))
195
+ x2 = self.stem2a(x)
196
+ x2 = F.pad(x2, (0, 1, 0, 1))
197
+ x2 = self.stem2b(x2)
198
+ x1 = self.pool(x)
199
+ x = torch.cat([x1, x2], dim=1)
200
+ x = self.stem3(x)
201
+ x = self.stem4(x)
202
+ return x
203
+
204
+
205
+ class HighPerfGpuBlock(nn.Module):
206
+ def __init__(
207
+ self,
208
+ in_chs,
209
+ mid_chs,
210
+ out_chs,
211
+ layer_num,
212
+ kernel_size=3,
213
+ residual=False,
214
+ light_block=False,
215
+ use_lab=False,
216
+ agg='ese',
217
+ drop_path=0.,
218
+ ):
219
+ super().__init__()
220
+ self.residual = residual
221
+
222
+ self.layers = nn.ModuleList()
223
+ for i in range(layer_num):
224
+ if light_block:
225
+ self.layers.append(
226
+ LightConvBNAct(
227
+ in_chs if i == 0 else mid_chs,
228
+ mid_chs,
229
+ kernel_size=kernel_size,
230
+ use_lab=use_lab,
231
+ )
232
+ )
233
+ else:
234
+ self.layers.append(
235
+ ConvBNAct(
236
+ in_chs if i == 0 else mid_chs,
237
+ mid_chs,
238
+ kernel_size=kernel_size,
239
+ stride=1,
240
+ use_lab=use_lab,
241
+ )
242
+ )
243
+
244
+ # feature aggregation
245
+ total_chs = in_chs + layer_num * mid_chs
246
+ if agg == 'se':
247
+ aggregation_squeeze_conv = ConvBNAct(
248
+ total_chs,
249
+ out_chs // 2,
250
+ kernel_size=1,
251
+ stride=1,
252
+ use_lab=use_lab,
253
+ )
254
+ aggregation_excitation_conv = ConvBNAct(
255
+ out_chs // 2,
256
+ out_chs,
257
+ kernel_size=1,
258
+ stride=1,
259
+ use_lab=use_lab,
260
+ )
261
+ self.aggregation = nn.Sequential(
262
+ aggregation_squeeze_conv,
263
+ aggregation_excitation_conv,
264
+ )
265
+ else:
266
+ aggregation_conv = ConvBNAct(
267
+ total_chs,
268
+ out_chs,
269
+ kernel_size=1,
270
+ stride=1,
271
+ use_lab=use_lab,
272
+ )
273
+ att = EseModule(out_chs)
274
+ self.aggregation = nn.Sequential(
275
+ aggregation_conv,
276
+ att,
277
+ )
278
+
279
+ self.drop_path = DropPath(drop_path) if drop_path else nn.Identity()
280
+
281
+ def forward(self, x):
282
+ identity = x
283
+ output = [x]
284
+ for layer in self.layers:
285
+ x = layer(x)
286
+ output.append(x)
287
+ x = torch.cat(output, dim=1)
288
+ x = self.aggregation(x)
289
+ if self.residual:
290
+ x = self.drop_path(x) + identity
291
+ return x
292
+
293
+
294
+ class HighPerfGpuStage(nn.Module):
295
+ def __init__(
296
+ self,
297
+ in_chs,
298
+ mid_chs,
299
+ out_chs,
300
+ block_num,
301
+ layer_num,
302
+ downsample=True,
303
+ stride=2,
304
+ light_block=False,
305
+ kernel_size=3,
306
+ use_lab=False,
307
+ agg='ese',
308
+ drop_path=0.,
309
+ ):
310
+ super().__init__()
311
+ self.downsample = downsample
312
+ if downsample:
313
+ self.downsample = ConvBNAct(
314
+ in_chs,
315
+ in_chs,
316
+ kernel_size=3,
317
+ stride=stride,
318
+ groups=in_chs,
319
+ use_act=False,
320
+ use_lab=use_lab,
321
+ )
322
+ else:
323
+ self.downsample = nn.Identity()
324
+
325
+ blocks_list = []
326
+ for i in range(block_num):
327
+ blocks_list.append(
328
+ HighPerfGpuBlock(
329
+ in_chs if i == 0 else out_chs,
330
+ mid_chs,
331
+ out_chs,
332
+ layer_num,
333
+ residual=False if i == 0 else True,
334
+ kernel_size=kernel_size,
335
+ light_block=light_block,
336
+ use_lab=use_lab,
337
+ agg=agg,
338
+ drop_path=drop_path[i] if isinstance(drop_path, (list, tuple)) else drop_path,
339
+ )
340
+ )
341
+ self.blocks = nn.Sequential(*blocks_list)
342
+ self.grad_checkpointing= False
343
+
344
+ def forward(self, x):
345
+ x = self.downsample(x)
346
+ if self.grad_checkpointing and not torch.jit.is_scripting():
347
+ x = checkpoint_seq(self.blocks, x, flatten=False)
348
+ else:
349
+ x = self.blocks(x)
350
+ return x
351
+
352
+
353
+ class ClassifierHead(nn.Module):
354
+ def __init__(
355
+ self,
356
+ in_features: int,
357
+ num_classes: int,
358
+ pool_type: str = 'avg',
359
+ drop_rate: float = 0.,
360
+ hidden_size: Optional[int] = 2048,
361
+ use_lab: bool = False
362
+ ):
363
+ super(ClassifierHead, self).__init__()
364
+ self.num_features = in_features
365
+ if pool_type is not None:
366
+ if not pool_type:
367
+ assert num_classes == 0, 'Classifier head must be removed if pooling is disabled'
368
+
369
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
370
+ if hidden_size is not None:
371
+ self.num_features = hidden_size
372
+ last_conv = nn.Conv2d(
373
+ in_features,
374
+ hidden_size,
375
+ kernel_size=1,
376
+ stride=1,
377
+ padding=0,
378
+ bias=False,
379
+ )
380
+ act = nn.ReLU()
381
+ if use_lab:
382
+ lab = LearnableAffineBlock()
383
+ self.last_conv = nn.Sequential(last_conv, act, lab)
384
+ else:
385
+ self.last_conv = nn.Sequential(last_conv, act)
386
+ else:
387
+ self.last_conv = nn.Identity()
388
+
389
+ self.dropout = nn.Dropout(drop_rate)
390
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled
391
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
392
+
393
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
394
+ if pool_type is not None:
395
+ if not pool_type:
396
+ assert num_classes == 0, 'Classifier head must be removed if pooling is disabled'
397
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
398
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity() # don't flatten if pooling disabled
399
+
400
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
401
+
402
+ def forward(self, x, pre_logits: bool = False):
403
+ x = self.global_pool(x)
404
+ x = self.last_conv(x)
405
+ x = self.dropout(x)
406
+ x = self.flatten(x)
407
+ if pre_logits:
408
+ return x
409
+ x = self.fc(x)
410
+ return x
411
+
412
+
413
+ class HighPerfGpuNet(nn.Module):
414
+
415
+ def __init__(
416
+ self,
417
+ cfg: Dict,
418
+ in_chans: int = 3,
419
+ num_classes: int = 1000,
420
+ global_pool: str = 'avg',
421
+ head_hidden_size: Optional[int] = 2048,
422
+ drop_rate: float = 0.,
423
+ drop_path_rate: float = 0.,
424
+ use_lab: bool = False,
425
+ **kwargs,
426
+ ):
427
+ super(HighPerfGpuNet, self).__init__()
428
+ stem_type = cfg["stem_type"]
429
+ stem_chs = cfg["stem_chs"]
430
+ stages_cfg = [cfg["stage1"], cfg["stage2"], cfg["stage3"], cfg["stage4"]]
431
+ self.num_classes = num_classes
432
+ self.drop_rate = drop_rate
433
+ self.use_lab = use_lab
434
+
435
+ assert stem_type in ['v1', 'v2']
436
+ if stem_type == 'v2':
437
+ self.stem = StemV2(
438
+ in_chs=in_chans,
439
+ mid_chs=stem_chs[0],
440
+ out_chs=stem_chs[1],
441
+ use_lab=use_lab)
442
+ else:
443
+ self.stem = StemV1([in_chans] + stem_chs)
444
+
445
+ current_stride = 4
446
+
447
+ stages = []
448
+ self.feature_info = []
449
+ block_depths = [c[3] for c in stages_cfg]
450
+ dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(block_depths)).split(block_depths)]
451
+ for i, stage_config in enumerate(stages_cfg):
452
+ in_chs, mid_chs, out_chs, block_num, downsample, light_block, kernel_size, layer_num = stage_config
453
+ stages += [HighPerfGpuStage(
454
+ in_chs=in_chs,
455
+ mid_chs=mid_chs,
456
+ out_chs=out_chs,
457
+ block_num=block_num,
458
+ layer_num=layer_num,
459
+ downsample=downsample,
460
+ light_block=light_block,
461
+ kernel_size=kernel_size,
462
+ use_lab=use_lab,
463
+ agg='ese' if stem_type == 'v1' else 'se',
464
+ drop_path=dpr[i],
465
+ )]
466
+ self.num_features = out_chs
467
+ if downsample:
468
+ current_stride *= 2
469
+ self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')]
470
+ self.stages = nn.Sequential(*stages)
471
+
472
+ self.head = ClassifierHead(
473
+ self.num_features,
474
+ num_classes=num_classes,
475
+ pool_type=global_pool,
476
+ drop_rate=drop_rate,
477
+ hidden_size=head_hidden_size,
478
+ use_lab=use_lab
479
+ )
480
+ self.head_hidden_size = self.head.num_features
481
+
482
+ for n, m in self.named_modules():
483
+ if isinstance(m, nn.Conv2d):
484
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
485
+ elif isinstance(m, nn.BatchNorm2d):
486
+ nn.init.ones_(m.weight)
487
+ nn.init.zeros_(m.bias)
488
+ elif isinstance(m, nn.Linear):
489
+ nn.init.zeros_(m.bias)
490
+
491
+ @torch.jit.ignore
492
+ def group_matcher(self, coarse=False):
493
+ return dict(
494
+ stem=r'^stem',
495
+ blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)',
496
+ )
497
+
498
+ @torch.jit.ignore
499
+ def set_grad_checkpointing(self, enable=True):
500
+ for s in self.stages:
501
+ s.grad_checkpointing = enable
502
+
503
+ @torch.jit.ignore
504
+ def get_classifier(self) -> nn.Module:
505
+ return self.head.fc
506
+
507
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
508
+ self.num_classes = num_classes
509
+ self.head.reset(num_classes, global_pool)
510
+
511
+ def forward_features(self, x):
512
+ x = self.stem(x)
513
+ return self.stages(x)
514
+
515
+ def forward_head(self, x, pre_logits: bool = False):
516
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
517
+
518
+ def forward(self, x):
519
+ x = self.forward_features(x)
520
+ x = self.forward_head(x)
521
+ return x
522
+
523
+
524
+ model_cfgs = dict(
525
+ # PP-HGNet
526
+ hgnet_tiny={
527
+ "stem_type": 'v1',
528
+ "stem_chs": [48, 48, 96],
529
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
530
+ "stage1": [96, 96, 224, 1, False, False, 3, 5],
531
+ "stage2": [224, 128, 448, 1, True, False, 3, 5],
532
+ "stage3": [448, 160, 512, 2, True, False, 3, 5],
533
+ "stage4": [512, 192, 768, 1, True, False, 3, 5],
534
+ },
535
+ hgnet_small={
536
+ "stem_type": 'v1',
537
+ "stem_chs": [64, 64, 128],
538
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
539
+ "stage1": [128, 128, 256, 1, False, False, 3, 6],
540
+ "stage2": [256, 160, 512, 1, True, False, 3, 6],
541
+ "stage3": [512, 192, 768, 2, True, False, 3, 6],
542
+ "stage4": [768, 224, 1024, 1, True, False, 3, 6],
543
+ },
544
+ hgnet_base={
545
+ "stem_type": 'v1',
546
+ "stem_chs": [96, 96, 160],
547
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
548
+ "stage1": [160, 192, 320, 1, False, False, 3, 7],
549
+ "stage2": [320, 224, 640, 2, True, False, 3, 7],
550
+ "stage3": [640, 256, 960, 3, True, False, 3, 7],
551
+ "stage4": [960, 288, 1280, 2, True, False, 3, 7],
552
+ },
553
+ # PP-HGNetv2
554
+ hgnetv2_b0={
555
+ "stem_type": 'v2',
556
+ "stem_chs": [16, 16],
557
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
558
+ "stage1": [16, 16, 64, 1, False, False, 3, 3],
559
+ "stage2": [64, 32, 256, 1, True, False, 3, 3],
560
+ "stage3": [256, 64, 512, 2, True, True, 5, 3],
561
+ "stage4": [512, 128, 1024, 1, True, True, 5, 3],
562
+ },
563
+ hgnetv2_b1={
564
+ "stem_type": 'v2',
565
+ "stem_chs": [24, 32],
566
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
567
+ "stage1": [32, 32, 64, 1, False, False, 3, 3],
568
+ "stage2": [64, 48, 256, 1, True, False, 3, 3],
569
+ "stage3": [256, 96, 512, 2, True, True, 5, 3],
570
+ "stage4": [512, 192, 1024, 1, True, True, 5, 3],
571
+ },
572
+ hgnetv2_b2={
573
+ "stem_type": 'v2',
574
+ "stem_chs": [24, 32],
575
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
576
+ "stage1": [32, 32, 96, 1, False, False, 3, 4],
577
+ "stage2": [96, 64, 384, 1, True, False, 3, 4],
578
+ "stage3": [384, 128, 768, 3, True, True, 5, 4],
579
+ "stage4": [768, 256, 1536, 1, True, True, 5, 4],
580
+ },
581
+ hgnetv2_b3={
582
+ "stem_type": 'v2',
583
+ "stem_chs": [24, 32],
584
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
585
+ "stage1": [32, 32, 128, 1, False, False, 3, 5],
586
+ "stage2": [128, 64, 512, 1, True, False, 3, 5],
587
+ "stage3": [512, 128, 1024, 3, True, True, 5, 5],
588
+ "stage4": [1024, 256, 2048, 1, True, True, 5, 5],
589
+ },
590
+ hgnetv2_b4={
591
+ "stem_type": 'v2',
592
+ "stem_chs": [32, 48],
593
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
594
+ "stage1": [48, 48, 128, 1, False, False, 3, 6],
595
+ "stage2": [128, 96, 512, 1, True, False, 3, 6],
596
+ "stage3": [512, 192, 1024, 3, True, True, 5, 6],
597
+ "stage4": [1024, 384, 2048, 1, True, True, 5, 6],
598
+ },
599
+ hgnetv2_b5={
600
+ "stem_type": 'v2',
601
+ "stem_chs": [32, 64],
602
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
603
+ "stage1": [64, 64, 128, 1, False, False, 3, 6],
604
+ "stage2": [128, 128, 512, 2, True, False, 3, 6],
605
+ "stage3": [512, 256, 1024, 5, True, True, 5, 6],
606
+ "stage4": [1024, 512, 2048, 2, True, True, 5, 6],
607
+ },
608
+ hgnetv2_b6={
609
+ "stem_type": 'v2',
610
+ "stem_chs": [48, 96],
611
+ # in_chs, mid_chs, out_chs, blocks, downsample, light_block, kernel_size, layer_num
612
+ "stage1": [96, 96, 192, 2, False, False, 3, 6],
613
+ "stage2": [192, 192, 512, 3, True, False, 3, 6],
614
+ "stage3": [512, 384, 1024, 6, True, True, 5, 6],
615
+ "stage4": [1024, 768, 2048, 3, True, True, 5, 6],
616
+ },
617
+ )
618
+
619
+
620
+ def _create_hgnet(variant, pretrained=False, **kwargs):
621
+ out_indices = kwargs.pop('out_indices', (0, 1, 2, 3))
622
+ return build_model_with_cfg(
623
+ HighPerfGpuNet,
624
+ variant,
625
+ pretrained,
626
+ model_cfg=model_cfgs[variant],
627
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
628
+ **kwargs,
629
+ )
630
+
631
+
632
+ def _cfg(url='', **kwargs):
633
+ return {
634
+ 'url': url,
635
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
636
+ 'crop_pct': 0.965, 'interpolation': 'bicubic',
637
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
638
+ 'classifier': 'head.fc', 'first_conv': 'stem.stem1.conv',
639
+ 'test_crop_pct': 1.0, 'test_input_size': (3, 288, 288),
640
+ **kwargs,
641
+ }
642
+
643
+
644
+ default_cfgs = generate_default_cfgs({
645
+ 'hgnet_tiny.paddle_in1k': _cfg(
646
+ first_conv='stem.stem.0.conv',
647
+ hf_hub_id='timm/'),
648
+ 'hgnet_tiny.ssld_in1k': _cfg(
649
+ first_conv='stem.stem.0.conv',
650
+ hf_hub_id='timm/'),
651
+ 'hgnet_small.paddle_in1k': _cfg(
652
+ first_conv='stem.stem.0.conv',
653
+ hf_hub_id='timm/'),
654
+ 'hgnet_small.ssld_in1k': _cfg(
655
+ first_conv='stem.stem.0.conv',
656
+ hf_hub_id='timm/'),
657
+ 'hgnet_base.ssld_in1k': _cfg(
658
+ first_conv='stem.stem.0.conv',
659
+ hf_hub_id='timm/'),
660
+ 'hgnetv2_b0.ssld_stage2_ft_in1k': _cfg(
661
+ hf_hub_id='timm/'),
662
+ 'hgnetv2_b0.ssld_stage1_in22k_in1k': _cfg(
663
+ hf_hub_id='timm/'),
664
+ 'hgnetv2_b1.ssld_stage2_ft_in1k': _cfg(
665
+ hf_hub_id='timm/'),
666
+ 'hgnetv2_b1.ssld_stage1_in22k_in1k': _cfg(
667
+ hf_hub_id='timm/'),
668
+ 'hgnetv2_b2.ssld_stage2_ft_in1k': _cfg(
669
+ hf_hub_id='timm/'),
670
+ 'hgnetv2_b2.ssld_stage1_in22k_in1k': _cfg(
671
+ hf_hub_id='timm/'),
672
+ 'hgnetv2_b3.ssld_stage2_ft_in1k': _cfg(
673
+ hf_hub_id='timm/'),
674
+ 'hgnetv2_b3.ssld_stage1_in22k_in1k': _cfg(
675
+ hf_hub_id='timm/'),
676
+ 'hgnetv2_b4.ssld_stage2_ft_in1k': _cfg(
677
+ hf_hub_id='timm/'),
678
+ 'hgnetv2_b4.ssld_stage1_in22k_in1k': _cfg(
679
+ hf_hub_id='timm/'),
680
+ 'hgnetv2_b5.ssld_stage2_ft_in1k': _cfg(
681
+ hf_hub_id='timm/'),
682
+ 'hgnetv2_b5.ssld_stage1_in22k_in1k': _cfg(
683
+ hf_hub_id='timm/'),
684
+ 'hgnetv2_b6.ssld_stage2_ft_in1k': _cfg(
685
+ hf_hub_id='timm/'),
686
+ 'hgnetv2_b6.ssld_stage1_in22k_in1k': _cfg(
687
+ hf_hub_id='timm/'),
688
+ })
689
+
690
+
691
+ @register_model
692
+ def hgnet_tiny(pretrained=False, **kwargs) -> HighPerfGpuNet:
693
+ return _create_hgnet('hgnet_tiny', pretrained=pretrained, **kwargs)
694
+
695
+
696
+ @register_model
697
+ def hgnet_small(pretrained=False, **kwargs) -> HighPerfGpuNet:
698
+ return _create_hgnet('hgnet_small', pretrained=pretrained, **kwargs)
699
+
700
+
701
+ @register_model
702
+ def hgnet_base(pretrained=False, **kwargs) -> HighPerfGpuNet:
703
+ return _create_hgnet('hgnet_base', pretrained=pretrained, **kwargs)
704
+
705
+
706
+ @register_model
707
+ def hgnetv2_b0(pretrained=False, **kwargs) -> HighPerfGpuNet:
708
+ return _create_hgnet('hgnetv2_b0', pretrained=pretrained, use_lab=True, **kwargs)
709
+
710
+
711
+ @register_model
712
+ def hgnetv2_b1(pretrained=False, **kwargs) -> HighPerfGpuNet:
713
+ return _create_hgnet('hgnetv2_b1', pretrained=pretrained, use_lab=True, **kwargs)
714
+
715
+
716
+ @register_model
717
+ def hgnetv2_b2(pretrained=False, **kwargs) -> HighPerfGpuNet:
718
+ return _create_hgnet('hgnetv2_b2', pretrained=pretrained, use_lab=True, **kwargs)
719
+
720
+
721
+ @register_model
722
+ def hgnetv2_b3(pretrained=False, **kwargs) -> HighPerfGpuNet:
723
+ return _create_hgnet('hgnetv2_b3', pretrained=pretrained, use_lab=True, **kwargs)
724
+
725
+
726
+ @register_model
727
+ def hgnetv2_b4(pretrained=False, **kwargs) -> HighPerfGpuNet:
728
+ return _create_hgnet('hgnetv2_b4', pretrained=pretrained, **kwargs)
729
+
730
+
731
+ @register_model
732
+ def hgnetv2_b5(pretrained=False, **kwargs) -> HighPerfGpuNet:
733
+ return _create_hgnet('hgnetv2_b5', pretrained=pretrained, **kwargs)
734
+
735
+
736
+ @register_model
737
+ def hgnetv2_b6(pretrained=False, **kwargs) -> HighPerfGpuNet:
738
+ return _create_hgnet('hgnetv2_b6', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/hiera.py ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ An PyTorch implementation of Hiera
2
+
3
+ Adapted for timm from originals at https://github.com/facebookresearch/hiera
4
+ """
5
+
6
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
7
+ # All rights reserved.
8
+
9
+ # This source code is licensed under the license found in the
10
+ # LICENSE file in the root directory of this source tree.
11
+ # --------------------------------------------------------
12
+ #
13
+ # Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
14
+ #
15
+ # Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan,
16
+ # Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed,
17
+ # Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer.
18
+ #
19
+ # Paper: https://arxiv.org/abs/2306.00989/
20
+ #
21
+ # References:
22
+ # slowfast: https://github.com/facebookresearch/SlowFast
23
+ # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
24
+ # --------------------------------------------------------
25
+ import math
26
+ from functools import partial
27
+ from typing import Callable, Dict, List, Optional, Tuple, Type, Union
28
+
29
+ import torch
30
+ import torch.nn as nn
31
+ import torch.nn.functional as F
32
+ from torch.utils.checkpoint import checkpoint
33
+
34
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
35
+ from timm.layers import DropPath, Mlp, LayerScale, ClNormMlpClassifierHead, use_fused_attn, \
36
+ _assert, get_norm_layer, to_2tuple, init_weight_vit, init_weight_jax
37
+
38
+ from ._registry import generate_default_cfgs, register_model
39
+ from ._builder import build_model_with_cfg
40
+ from ._features import feature_take_indices
41
+ from ._features_fx import register_notrace_function
42
+ from ._manipulate import named_apply
43
+
44
+
45
+ __all__ = ['Hiera']
46
+
47
+
48
+ def conv_nd(n: int) -> Type[nn.Module]:
49
+ """
50
+ Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3.
51
+ If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises)
52
+ """
53
+ return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n]
54
+
55
+
56
+ @register_notrace_function
57
+ def get_resized_mask(target_size: List[int], mask: torch.Tensor) -> torch.Tensor:
58
+ # target_size: [(T), (H), W]
59
+ # (spatial) mask: [B, C, (t), (h), w]
60
+ if mask is None:
61
+ return mask
62
+
63
+ _assert(len(mask.shape[2:]) == len(target_size), "mask spatial shape and target_size must match.")
64
+ if mask.shape[2:] != target_size:
65
+ return F.interpolate(mask.float(), size=target_size)
66
+ return mask
67
+
68
+
69
+ def undo_windowing(
70
+ x: torch.Tensor,
71
+ shape: List[int],
72
+ mu_shape: List[int],
73
+ ) -> torch.Tensor:
74
+ """
75
+ Restore spatial organization by undoing windowed organization of mask units.
76
+
77
+ Args:
78
+ x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C]
79
+ shape: current spatial shape, if it were not organized into mask unit
80
+ windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C].
81
+ mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx]
82
+ Returns:
83
+ x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C]
84
+ """
85
+ D = len(shape)
86
+ B, C = x.shape[0], x.shape[-1]
87
+ # [B, #MUy*#MUx, MUy, MUx, C] -> [B, #MUy, #MUx, MUy, MUx, C]
88
+ num_MUs = [s // mu for s, mu in zip(shape, mu_shape)]
89
+ x = x.view(B, *num_MUs, *mu_shape, C)
90
+
91
+ # [B, #MUy, #MUx, MUy, MUx, C] -> [B, #MUy*MUy, #MUx*MUx, C]
92
+ permute = (
93
+ [0]
94
+ + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], [])
95
+ + [len(x.shape) - 1]
96
+ )
97
+ x = x.permute(permute).reshape(B, *shape, C)
98
+
99
+ return x
100
+
101
+
102
+ class Unroll(nn.Module):
103
+ """
104
+ Reorders the tokens such that patches are contiguous in memory.
105
+ E.g., given [B, (H, W), C] and stride of (Sy, Sx), this will re-order the tokens as
106
+ [B, (Sy, Sx, H // Sy, W // Sx), C]
107
+
108
+ This allows operations like Max2d to be computed as x.view(B, Sx*Sy, -1, C).max(dim=1).
109
+ Not only is this faster, but it also makes it easy to support inputs of arbitrary
110
+ dimensions in addition to patch-wise sparsity.
111
+
112
+ Performing this operation multiple times in sequence puts entire windows as contiguous
113
+ in memory. For instance, if you applied the stride (2, 2) 3 times, entire windows of
114
+ size 8x8 would be contiguous in memory, allowing operations like mask unit attention
115
+ computed easily and efficiently, while also allowing max to be applied sequentially.
116
+
117
+ Note: This means that intermediate values of the model are not in HxW order, so they
118
+ need to be re-rolled if you want to use the intermediate values as a HxW feature map.
119
+ The last block of the network is fine though, since by then the strides are all consumed.
120
+ """
121
+
122
+ def __init__(
123
+ self,
124
+ input_size: Tuple[int, ...],
125
+ patch_stride: Tuple[int, ...],
126
+ unroll_schedule: List[Tuple[int, ...]],
127
+ ):
128
+ super().__init__()
129
+ self.size = [i // s for i, s in zip(input_size, patch_stride)]
130
+ self.schedule = unroll_schedule
131
+
132
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
133
+ """
134
+ Input: Flattened patch embeddings [B, N, C]
135
+ Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd
136
+ """
137
+ B, _, C = x.shape
138
+ cur_size = self.size
139
+ x = x.view(*([B] + cur_size + [C]))
140
+
141
+ for strides in self.schedule:
142
+ # Move patches with the given strides to the batch dimension
143
+
144
+ # Create a view of the tensor with the patch stride as separate dims
145
+ # For example in 2d: [B, H // Sy, Sy, W // Sx, Sx, C]
146
+ cur_size = [i // s for i, s in zip(cur_size, strides)]
147
+ new_shape = [B] + sum([[i, s] for i, s in zip(cur_size, strides)], []) + [C]
148
+ x = x.view(new_shape)
149
+
150
+ # Move the patch stride into the batch dimension
151
+ # For example in 2d: [B, Sy, Sx, H // Sy, W // Sx, C]
152
+ L = len(new_shape)
153
+ permute = [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1]
154
+ x = x.permute(permute)
155
+
156
+ # Now finally flatten the relevant dims into the batch dimension
157
+ x = x.flatten(0, len(strides))
158
+ B *= math.prod(strides)
159
+
160
+ x = x.reshape(-1, math.prod(self.size), C)
161
+ return x
162
+
163
+
164
+ class Reroll(nn.Module):
165
+ """
166
+ Undos the "unroll" operation so that you can use intermediate features.
167
+ """
168
+
169
+ def __init__(
170
+ self,
171
+ input_size: Tuple[int, ...],
172
+ patch_stride: Tuple[int, ...],
173
+ unroll_schedule: List[Tuple[int, ...]],
174
+ stage_ends: List[int],
175
+ q_pool: int,
176
+ ):
177
+ super().__init__()
178
+ self.size = [i // s for i, s in zip(input_size, patch_stride)]
179
+
180
+ # The first stage has to reverse everything
181
+ # The next stage has to reverse all but the first unroll, etc.
182
+ self.schedule = {}
183
+ size = self.size
184
+ for i in range(stage_ends[-1] + 1):
185
+ self.schedule[i] = unroll_schedule, size
186
+ # schedule unchanged if no pooling at a stage end
187
+ if i in stage_ends[:q_pool]:
188
+ if len(unroll_schedule) > 0:
189
+ size = [n // s for n, s in zip(size, unroll_schedule[0])]
190
+ unroll_schedule = unroll_schedule[1:]
191
+
192
+ def forward(
193
+ self,
194
+ x: torch.Tensor,
195
+ block_idx: int,
196
+ mask: torch.Tensor = None
197
+ ) -> torch.Tensor:
198
+ """
199
+ Roll the given tensor back up to spatial order assuming it's from the given block.
200
+
201
+ If no mask is provided:
202
+ - Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc.
203
+ If a mask is provided:
204
+ - Returns [B, #MUs, MUy, MUx, C] for 2d, etc.
205
+ """
206
+ schedule, size = self.schedule[block_idx]
207
+ B, N, C = x.shape
208
+
209
+ D = len(size)
210
+ cur_mu_shape = [1] * D
211
+
212
+ for strides in schedule:
213
+ # Extract the current patch from N
214
+ x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C)
215
+
216
+ # Move that patch into the current MU
217
+ # Example in 2d: [B, Sy, Sx, N//(Sy*Sx), MUy, MUx, C] -> [B, N//(Sy*Sx), Sy, MUy, Sx, MUx, C]
218
+ L = len(x.shape)
219
+ permute = (
220
+ [0, 1 + D]
221
+ + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], [])
222
+ + [L - 1]
223
+ )
224
+ x = x.permute(permute)
225
+
226
+ # Reshape to [B, N//(Sy*Sx), *MU, C]
227
+ for i in range(D):
228
+ cur_mu_shape[i] *= strides[i]
229
+ x = x.reshape(B, -1, *cur_mu_shape, C)
230
+ N = x.shape[1]
231
+
232
+ # Current shape (e.g., 2d: [B, #MUy*#MUx, MUy, MUx, C])
233
+ x = x.view(B, N, *cur_mu_shape, C)
234
+
235
+ # If masked, return [B, #MUs, MUy, MUx, C]
236
+ if mask is not None:
237
+ return x
238
+
239
+ # If not masked, we can return [B, H, W, C]
240
+ x = undo_windowing(x, size, cur_mu_shape)
241
+
242
+ return x
243
+
244
+
245
+ class MaskUnitAttention(nn.Module):
246
+ """
247
+ Computes either Mask Unit or Global Attention. Also is able to perform q pooling.
248
+
249
+ Note: this assumes the tokens have already been flattened and unrolled into mask units.
250
+ See `Unroll` for more details.
251
+ """
252
+ fused_attn: torch.jit.Final[bool]
253
+
254
+ def __init__(
255
+ self,
256
+ dim: int,
257
+ dim_out: int,
258
+ heads: int,
259
+ q_stride: int = 1,
260
+ window_size: int = 0,
261
+ use_mask_unit_attn: bool = False,
262
+ ):
263
+ """
264
+ Args:
265
+ - dim, dim_out: The input and output feature dimensions.
266
+ - heads: The number of attention heads.
267
+ - q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4).
268
+ - window_size: The current (flattened) size of a mask unit *after* pooling (if any).
269
+ - use_mask_unit_attn: Use Mask Unit or Global Attention.
270
+ """
271
+ super().__init__()
272
+
273
+ self.dim = dim
274
+ self.dim_out = dim_out
275
+ self.heads = heads
276
+ self.q_stride = q_stride
277
+ self.head_dim = dim_out // heads
278
+ self.scale = self.head_dim ** -0.5
279
+ self.fused_attn = use_fused_attn()
280
+
281
+ self.qkv = nn.Linear(dim, 3 * dim_out)
282
+ self.proj = nn.Linear(dim_out, dim_out)
283
+
284
+ self.window_size = window_size
285
+ self.use_mask_unit_attn = use_mask_unit_attn
286
+
287
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
288
+ """ Input should be of shape [batch, tokens, channels]. """
289
+ B, N, _ = x.shape
290
+ num_windows = (N // (self.q_stride * self.window_size)) if self.use_mask_unit_attn else 1
291
+ qkv = self.qkv(x).reshape(B, -1, num_windows, 3, self.heads, self.head_dim).permute(3, 0, 4, 2, 1, 5)
292
+ q, k, v = qkv.unbind(0)
293
+
294
+ if self.q_stride > 1:
295
+ # Refer to Unroll to see how this performs a maxpool-Nd
296
+ q = q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim).amax(dim=3)
297
+
298
+ if self.fused_attn:
299
+ # Note: the original paper did *not* use SDPA, it's a free boost!
300
+ x = F.scaled_dot_product_attention(q, k, v)
301
+ else:
302
+ attn = (q * self.scale) @ k.transpose(-1, -2)
303
+ attn = attn.softmax(dim=-1)
304
+ x = attn @ v
305
+
306
+ x = x.transpose(1, 3).reshape(B, -1, self.dim_out)
307
+ x = self.proj(x)
308
+ return x
309
+
310
+
311
+ class HieraBlock(nn.Module):
312
+ def __init__(
313
+ self,
314
+ dim: int,
315
+ dim_out: int,
316
+ heads: int,
317
+ mlp_ratio: float = 4.0,
318
+ drop_path: float = 0.0,
319
+ init_values: Optional[float] = None,
320
+ norm_layer: nn.Module = nn.LayerNorm,
321
+ act_layer: nn.Module = nn.GELU,
322
+ q_stride: int = 1,
323
+ window_size: int = 0,
324
+ use_expand_proj: bool = True,
325
+ use_mask_unit_attn: bool = False,
326
+ ):
327
+ super().__init__()
328
+ self.dim = dim
329
+ self.dim_out = dim_out
330
+
331
+ self.norm1 = norm_layer(dim)
332
+ if dim != dim_out:
333
+ self.do_expand = True
334
+ if use_expand_proj:
335
+ self.proj = nn.Linear(dim, dim_out)
336
+ else:
337
+ assert dim_out == dim * 2
338
+ self.proj = None
339
+ else:
340
+ self.do_expand = False
341
+ self.proj = None
342
+ self.attn = MaskUnitAttention(
343
+ dim,
344
+ dim_out,
345
+ heads,
346
+ q_stride,
347
+ window_size,
348
+ use_mask_unit_attn
349
+ )
350
+ self.ls1 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity()
351
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0 else nn.Identity()
352
+
353
+ self.norm2 = norm_layer(dim_out)
354
+ self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer)
355
+ self.ls2 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity()
356
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0 else nn.Identity()
357
+
358
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
359
+ # Attention + Q Pooling
360
+ x_norm = self.norm1(x)
361
+ if self.do_expand:
362
+ if self.proj is not None:
363
+ x = self.proj(x_norm)
364
+ x = x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1) # max-pool
365
+ else:
366
+ x = torch.cat([
367
+ x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1), # max-pool
368
+ x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).mean(dim=1), # avg-pool
369
+ ],
370
+ dim=-1,
371
+ )
372
+ x = x + self.drop_path1(self.ls1(self.attn(x_norm)))
373
+
374
+ # MLP
375
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
376
+ return x
377
+
378
+
379
+ class PatchEmbed(nn.Module):
380
+ """Patch embed that supports any number of spatial dimensions (1d, 2d, 3d)."""
381
+
382
+ def __init__(
383
+ self,
384
+ dim_in: int,
385
+ dim_out: int,
386
+ kernel: Tuple[int, ...],
387
+ stride: Tuple[int, ...],
388
+ padding: Tuple[int, ...],
389
+ reshape: bool = True,
390
+ ):
391
+ super().__init__()
392
+
393
+ # Support any number of spatial dimensions
394
+ self.spatial_dims = len(kernel)
395
+ self.reshape = reshape
396
+ self.proj = conv_nd(self.spatial_dims)(
397
+ dim_in,
398
+ dim_out,
399
+ kernel_size=kernel,
400
+ stride=stride,
401
+ padding=padding,
402
+ )
403
+
404
+ def forward(
405
+ self,
406
+ x: torch.Tensor,
407
+ mask: Optional[torch.Tensor] = None,
408
+ ) -> torch.Tensor:
409
+ if mask is not None:
410
+ mask = get_resized_mask(target_size=x.shape[2:], mask=mask)
411
+ x = self.proj(x * mask.to(torch.bool))
412
+ else:
413
+ x = self.proj(x)
414
+ if self.reshape:
415
+ x = x.reshape(x.shape[0], x.shape[1], -1).transpose(2, 1)
416
+ return x
417
+
418
+
419
+ class Hiera(nn.Module):
420
+
421
+ def __init__(
422
+ self,
423
+ img_size: Tuple[int, ...] = (224, 224),
424
+ in_chans: int = 3,
425
+ embed_dim: int = 96, # initial embed dim
426
+ num_heads: int = 1, # initial number of heads
427
+ num_classes: int = 1000,
428
+ global_pool: str = 'avg',
429
+ stages: Tuple[int, ...] = (2, 3, 16, 3),
430
+ q_pool: int = 3, # number of q_pool stages
431
+ q_stride: Tuple[int, ...] = (2, 2),
432
+ mask_unit_size: Tuple[int, ...] = (8, 8), # must divide q_stride ** (#stages-1)
433
+ # mask_unit_attn: which stages use mask unit attention?
434
+ mask_unit_attn: Tuple[bool, ...] = (True, True, False, False),
435
+ use_expand_proj: bool = True,
436
+ dim_mul: float = 2.0,
437
+ head_mul: float = 2.0,
438
+ patch_kernel: Tuple[int, ...] = (7, 7),
439
+ patch_stride: Tuple[int, ...] = (4, 4),
440
+ patch_padding: Tuple[int, ...] = (3, 3),
441
+ mlp_ratio: float = 4.0,
442
+ drop_path_rate: float = 0.0,
443
+ init_values: Optional[float] = None,
444
+ fix_init: bool = True,
445
+ weight_init: str = '',
446
+ norm_layer: Union[str, nn.Module] = "LayerNorm",
447
+ drop_rate: float = 0.0,
448
+ patch_drop_rate: float = 0.0,
449
+ head_init_scale: float = 0.001,
450
+ sep_pos_embed: bool = False,
451
+ abs_win_pos_embed: bool = False,
452
+ global_pos_size: Tuple[int, int] = (14, 14),
453
+ ):
454
+ super().__init__()
455
+ self.num_classes = num_classes
456
+ self.grad_checkpointing = False
457
+ norm_layer = get_norm_layer(norm_layer)
458
+ if isinstance(img_size, int):
459
+ img_size = to_2tuple(img_size)
460
+
461
+ self.patch_stride = patch_stride
462
+ self.tokens_spatial_shape = [i // s for i, s in zip(img_size, patch_stride)]
463
+ num_tokens = math.prod(self.tokens_spatial_shape)
464
+ flat_mu_size = math.prod(mask_unit_size)
465
+ flat_q_stride = math.prod(q_stride)
466
+ assert q_pool < len(stages)
467
+ self.q_pool, self.q_stride = q_pool, q_stride
468
+ self.mu_size, self.mask_unit_size = flat_mu_size, mask_unit_size
469
+ self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, self.mask_unit_size)]
470
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
471
+ self.patch_drop_rate = patch_drop_rate
472
+
473
+ self.patch_embed = PatchEmbed(
474
+ in_chans,
475
+ embed_dim,
476
+ patch_kernel,
477
+ patch_stride,
478
+ patch_padding,
479
+ )
480
+
481
+ self.pos_embed: Optional[nn.Parameter] = None
482
+ self.pos_embed_win: Optional[nn.Parameter] = None
483
+ self.pos_embed_spatial: Optional[nn.Parameter] = None
484
+ self.pos_embed_temporal: Optional[nn.Parameter] = None
485
+ if sep_pos_embed:
486
+ self.pos_embed_spatial = nn.Parameter(
487
+ torch.zeros(1, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], embed_dim)
488
+ )
489
+ self.pos_embed_temporal = nn.Parameter(
490
+ torch.zeros(1, self.tokens_spatial_shape[0], embed_dim)
491
+ )
492
+ else:
493
+ if abs_win_pos_embed:
494
+ # absolute win, params NCHW to make tile & interpolate more natural before add & reshape
495
+ self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *global_pos_size))
496
+ self.pos_embed_win = nn.Parameter(torch.zeros(1, embed_dim, *mask_unit_size))
497
+ else:
498
+ self.pos_embed = nn.Parameter(torch.zeros(1, num_tokens, embed_dim))
499
+
500
+ # Setup roll and reroll modules
501
+ self.unroll = Unroll(
502
+ img_size,
503
+ patch_stride,
504
+ [q_stride] * len(self.stage_ends[:-1])
505
+ )
506
+ self.reroll = Reroll(
507
+ img_size,
508
+ patch_stride,
509
+ [q_stride] * len(self.stage_ends[:-1]),
510
+ self.stage_ends,
511
+ q_pool,
512
+ )
513
+ # q_pool locations
514
+ q_pool_blocks = [x + 1 for x in self.stage_ends[:q_pool]]
515
+
516
+ # Transformer blocks
517
+ cur_stage = 0
518
+ depth = sum(stages)
519
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
520
+ self.blocks = nn.ModuleList()
521
+ self.feature_info = []
522
+ for i in range(depth):
523
+ dim_out = embed_dim
524
+ # Mask unit or global attention.
525
+ # Lag by 1 block, so that global attention,
526
+ # applied post pooling on lower resolution
527
+ use_mask_unit_attn = mask_unit_attn[cur_stage]
528
+
529
+ if i - 1 in self.stage_ends:
530
+ dim_out = int(embed_dim * dim_mul)
531
+ num_heads = int(num_heads * head_mul)
532
+ cur_stage += 1
533
+ if i in q_pool_blocks:
534
+ flat_mu_size //= flat_q_stride
535
+
536
+ block = HieraBlock(
537
+ dim=embed_dim,
538
+ dim_out=dim_out,
539
+ heads=num_heads,
540
+ mlp_ratio=mlp_ratio,
541
+ drop_path=dpr[i],
542
+ init_values=init_values,
543
+ norm_layer=norm_layer,
544
+ q_stride=(flat_q_stride if i in q_pool_blocks else 1),
545
+ window_size=flat_mu_size,
546
+ use_expand_proj=use_expand_proj,
547
+ use_mask_unit_attn=use_mask_unit_attn,
548
+ )
549
+ embed_dim = dim_out
550
+ if i in self.stage_ends:
551
+ self.feature_info += [
552
+ dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')]
553
+ self.blocks.append(block)
554
+
555
+ self.num_features = self.head_hidden_size = embed_dim
556
+ self.head = ClNormMlpClassifierHead(
557
+ embed_dim,
558
+ num_classes,
559
+ pool_type=global_pool,
560
+ drop_rate=drop_rate,
561
+ norm_layer=norm_layer,
562
+ input_fmt='NLC',
563
+ )
564
+
565
+ # Initialize everything
566
+ if sep_pos_embed:
567
+ nn.init.trunc_normal_(self.pos_embed_spatial, std=0.02)
568
+ nn.init.trunc_normal_(self.pos_embed_temporal, std=0.02)
569
+ else:
570
+ if self.pos_embed is not None:
571
+ nn.init.trunc_normal_(self.pos_embed, std=0.02)
572
+ if self.pos_embed_win is not None:
573
+ nn.init.trunc_normal_(self.pos_embed_win, std=0.02)
574
+
575
+ if weight_init != 'skip':
576
+ init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit
577
+ init_fn = partial(init_fn, classifier_name='head.fc')
578
+ named_apply(init_fn, self)
579
+ if fix_init:
580
+ self.fix_init_weight()
581
+ if isinstance(self.head.fc, nn.Linear):
582
+ self.head.fc.weight.data.mul_(head_init_scale)
583
+ self.head.fc.bias.data.mul_(head_init_scale)
584
+
585
+ def fix_init_weight(self):
586
+ def rescale(param, _layer_id):
587
+ param.div_(math.sqrt(2.0 * _layer_id))
588
+
589
+ for layer_id, layer in enumerate(self.blocks):
590
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
591
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
592
+
593
+ @torch.jit.ignore
594
+ def no_weight_decay(self):
595
+ if self.pos_embed is not None:
596
+ return ["pos_embed"]
597
+ elif self.pos_embed_abs is not None:
598
+ return ['pos_embed_abs', 'pos_embed_win']
599
+ else:
600
+ return ["pos_embed_spatial", "pos_embed_temporal"]
601
+
602
+ @torch.jit.ignore
603
+ def group_matcher(self, coarse: bool = False) -> Dict:
604
+ return dict(
605
+ stem=r'^pos_embed|pos_embed_spatial|pos_embed_temporal|pos_embed_abs|pos_embed_win|patch_embed',
606
+ blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
607
+ )
608
+
609
+ @torch.jit.ignore
610
+ def set_grad_checkpointing(self, enable: bool = True) -> None:
611
+ self.grad_checkpointing = enable
612
+
613
+ @torch.jit.ignore
614
+ def get_classifier(self):
615
+ return self.head.fc
616
+
617
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False):
618
+ self.num_classes = num_classes
619
+ self.head.reset(num_classes, global_pool, reset_other=reset_other)
620
+
621
+ def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor:
622
+ """
623
+ Generates a random mask, mask_ratio fraction are dropped.
624
+ 1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc.
625
+ """
626
+ B = x.shape[0]
627
+ # Tokens selected for masking at mask unit level
628
+ num_windows = math.prod(self.mask_spatial_shape) # num_mask_units
629
+ len_keep = int(num_windows * (1 - mask_ratio))
630
+ noise = torch.rand(B, num_windows, device=x.device)
631
+
632
+ # Sort noise for each sample
633
+ ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove
634
+ ids_restore = torch.argsort(ids_shuffle, dim=1)
635
+
636
+ # Generate the binary mask: 1 is *keep*, 0 is *remove*
637
+ # Note this is opposite to original MAE
638
+ mask = torch.zeros([B, num_windows], device=x.device)
639
+ mask[:, :len_keep] = 1
640
+ # Unshuffle to get the binary mask
641
+ mask = torch.gather(mask, dim=1, index=ids_restore)
642
+
643
+ return mask.bool()
644
+
645
+ def _pos_embed(self, x) -> torch.Tensor:
646
+ if self.pos_embed_win is not None:
647
+ # absolute win position embedding, from
648
+ # Window Attention is Bugged: How not to Interpolate Position Embeddings (https://arxiv.org/abs/2311.05613)
649
+ pos_embed_win = self.pos_embed_win.tile(self.mask_spatial_shape)
650
+ pos_embed = F.interpolate(
651
+ self.pos_embed,
652
+ size=pos_embed_win.shape[-2:],
653
+ mode='bicubic',
654
+ antialias=True,
655
+ )
656
+ pos_embed = pos_embed + pos_embed_win
657
+ pos_embed = pos_embed.flatten(2).transpose(1, 2)
658
+ elif self.pos_embed is not None:
659
+ pos_embed = self.pos_embed
660
+ else:
661
+ pos_embed = (
662
+ self.pos_embed_spatial.repeat(1, self.tokens_spatial_shape[0], 1)
663
+ +
664
+ torch.repeat_interleave(
665
+ self.pos_embed_temporal,
666
+ self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2],
667
+ dim=1,
668
+ )
669
+ )
670
+ x = x + pos_embed
671
+ return x
672
+
673
+ def forward_intermediates(
674
+ self,
675
+ x: torch.Tensor,
676
+ mask: Optional[torch.Tensor] = None,
677
+ indices: Optional[Union[int, List[int]]] = None,
678
+ norm: bool = False,
679
+ stop_early: bool = True,
680
+ output_fmt: str = 'NCHW',
681
+ intermediates_only: bool = False,
682
+ coarse: bool = True,
683
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
684
+ """ Forward features that returns intermediates.
685
+
686
+ Args:
687
+ x: Input image tensor
688
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
689
+ norm: Apply norm layer to all intermediates
690
+ stop_early: Stop iterating over blocks when last desired intermediate hit
691
+ output_fmt: Shape of intermediate feature outputs
692
+ intermediates_only: Only return intermediate features
693
+ Returns:
694
+
695
+ """
696
+ assert not norm, 'normalization of features not supported'
697
+ assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.'
698
+ if coarse:
699
+ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
700
+ take_indices = [self.stage_ends[i] for i in take_indices]
701
+ max_index = self.stage_ends[max_index]
702
+ else:
703
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
704
+
705
+ if mask is not None:
706
+ patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape
707
+ else:
708
+ patch_mask = None
709
+ x = self.patch_embed(x, mask=patch_mask)
710
+ x = self._pos_embed(x)
711
+ x = self.unroll(x)
712
+
713
+ # Discard masked tokens
714
+ if mask is not None:
715
+ x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1])
716
+
717
+ intermediates = []
718
+ if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
719
+ blocks = self.blocks
720
+ else:
721
+ blocks = self.blocks[:max_index + 1]
722
+ for i, blk in enumerate(blocks):
723
+ x = blk(x)
724
+ if i in take_indices:
725
+ x_int = self.reroll(x, i, mask=mask)
726
+ intermediates.append(x_int.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x_int)
727
+
728
+ if intermediates_only:
729
+ return intermediates
730
+
731
+ return x, intermediates
732
+
733
+ def prune_intermediate_layers(
734
+ self,
735
+ indices: Union[int, List[int]] = 1,
736
+ prune_norm: bool = False,
737
+ prune_head: bool = True,
738
+ coarse: bool = True,
739
+ ):
740
+ """ Prune layers not required for specified intermediates.
741
+ """
742
+ if coarse:
743
+ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
744
+ max_index = self.stage_ends[max_index]
745
+ else:
746
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
747
+ self.blocks = self.blocks[:max_index + 1] # truncate blocks
748
+ if prune_head:
749
+ self.head.reset(0, reset_other=True)
750
+ return take_indices
751
+
752
+ def forward_features(
753
+ self,
754
+ x: torch.Tensor,
755
+ mask: Optional[torch.Tensor] = None,
756
+ return_intermediates: bool = False,
757
+ ) -> torch.Tensor:
758
+ """
759
+ mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim.
760
+ Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch.
761
+ """
762
+ if self.training and self.patch_drop_rate > 0:
763
+ # using mask for something like 'patch dropout' via mask-units in supervised train / fine-tune
764
+ assert mask is None
765
+ mask = self.get_random_mask(x, mask_ratio=self.patch_drop_rate)
766
+
767
+ if mask is not None:
768
+ patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape
769
+ else:
770
+ patch_mask = None
771
+ x = self.patch_embed(x, mask=patch_mask)
772
+ x = self._pos_embed(x)
773
+ x = self.unroll(x)
774
+
775
+ # Discard masked tokens
776
+ if mask is not None:
777
+ x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1])
778
+
779
+ intermediates = []
780
+ for i, blk in enumerate(self.blocks):
781
+ if self.grad_checkpointing and not torch.jit.is_scripting():
782
+ x = checkpoint(blk, x)
783
+ else:
784
+ x = blk(x)
785
+ if return_intermediates and i in self.stage_ends:
786
+ intermediates.append(self.reroll(x, i, mask=mask))
787
+
788
+ # x may not always be in spatial order here.
789
+ # e.g. if q_pool = 2, mask_unit_size = (8, 8), and
790
+ # q_stride = (2, 2), not all unrolls were consumed,
791
+ # intermediates[-1] is x in spatial order
792
+ if return_intermediates:
793
+ return x, intermediates
794
+
795
+ return x
796
+
797
+ def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
798
+ x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
799
+ return x
800
+
801
+ def forward(
802
+ self,
803
+ x: torch.Tensor,
804
+ mask: Optional[torch.Tensor] = None,
805
+ ) -> torch.Tensor:
806
+ x = self.forward_features(x, mask=mask)
807
+ if mask is None:
808
+ x = self.forward_head(x)
809
+ return x
810
+
811
+
812
+ def _cfg(url='', **kwargs):
813
+ return {
814
+ 'url': url,
815
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
816
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
817
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
818
+ 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
819
+ **kwargs
820
+ }
821
+
822
+
823
+ default_cfgs = generate_default_cfgs({
824
+ "hiera_tiny_224.mae_in1k_ft_in1k": _cfg(
825
+ hf_hub_id='timm/',
826
+ license='cc-by-nc-4.0',
827
+ ),
828
+ "hiera_tiny_224.mae": _cfg(
829
+ hf_hub_id='timm/',
830
+ license='cc-by-nc-4.0',
831
+ num_classes=0,
832
+ ),
833
+
834
+ "hiera_small_224.mae_in1k_ft_in1k": _cfg(
835
+ hf_hub_id='timm/',
836
+ license='cc-by-nc-4.0',
837
+ ),
838
+ "hiera_small_224.mae": _cfg(
839
+ hf_hub_id='timm/',
840
+ license='cc-by-nc-4.0',
841
+ num_classes=0,
842
+ ),
843
+
844
+ "hiera_base_224.mae_in1k_ft_in1k": _cfg(
845
+ hf_hub_id='timm/',
846
+ license='cc-by-nc-4.0',
847
+ ),
848
+ "hiera_base_224.mae": _cfg(
849
+ hf_hub_id='timm/',
850
+ license='cc-by-nc-4.0',
851
+ num_classes=0,
852
+ ),
853
+
854
+ "hiera_base_plus_224.mae_in1k_ft_in1k": _cfg(
855
+ hf_hub_id='timm/',
856
+ license='cc-by-nc-4.0',
857
+ ),
858
+ "hiera_base_plus_224.mae": _cfg(
859
+ hf_hub_id='timm/',
860
+ license='cc-by-nc-4.0',
861
+ num_classes=0,
862
+ ),
863
+
864
+ "hiera_large_224.mae_in1k_ft_in1k": _cfg(
865
+ hf_hub_id='timm/',
866
+ license='cc-by-nc-4.0',
867
+ ),
868
+ "hiera_large_224.mae": _cfg(
869
+ hf_hub_id='timm/',
870
+ license='cc-by-nc-4.0',
871
+ num_classes=0,
872
+ ),
873
+
874
+ "hiera_huge_224.mae_in1k_ft_in1k": _cfg(
875
+ hf_hub_id='timm/',
876
+ license='cc-by-nc-4.0',
877
+ ),
878
+ "hiera_huge_224.mae": _cfg(
879
+ hf_hub_id='timm/',
880
+ license='cc-by-nc-4.0',
881
+ num_classes=0,
882
+ ),
883
+
884
+ "hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k": _cfg(
885
+ hf_hub_id='timm/',
886
+ input_size=(3, 256, 256), crop_pct=0.95,
887
+ ),
888
+ "hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k": _cfg(
889
+ hf_hub_id='timm/',
890
+ input_size=(3, 256, 256), crop_pct=0.95,
891
+ ),
892
+ "hiera_small_abswin_256.sbb2_e200_in12k": _cfg(
893
+ hf_hub_id='timm/',
894
+ num_classes=11821,
895
+ input_size=(3, 256, 256), crop_pct=0.95,
896
+ ),
897
+ "hiera_small_abswin_256.sbb2_pd_e200_in12k": _cfg(
898
+ hf_hub_id='timm/',
899
+ num_classes=11821,
900
+ input_size=(3, 256, 256), crop_pct=0.95,
901
+ ),
902
+ "hiera_base_abswin_256.untrained": _cfg(
903
+ # hf_hub_id='timm/',
904
+ input_size=(3, 256, 256), crop_pct=0.95,
905
+ ),
906
+ })
907
+
908
+
909
+ def checkpoint_filter_fn(state_dict, model=None):
910
+ state_dict = state_dict.get('model_state', state_dict)
911
+ output = {}
912
+ for k, v in state_dict.items():
913
+ # if k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
914
+ # # To resize pos embedding when using model at different size from pretrained weights
915
+ # from timm.layers import resample_abs_pos_embed
916
+ # v = resample_abs_pos_embed(
917
+ # v,
918
+ # new_size=(64, 64),
919
+ # num_prefix_tokens=0,
920
+ # verbose=True,
921
+ # )
922
+ if 'head.projection.' in k:
923
+ k = k.replace('head.projection.', 'head.fc.')
924
+ if k.startswith('encoder_norm.'):
925
+ k = k.replace('encoder_norm.', 'head.norm.')
926
+ elif k.startswith('norm.'):
927
+ k = k.replace('norm.', 'head.norm.')
928
+ if k == 'pos_embed_abs':
929
+ k = 'pos_embed'
930
+ output[k] = v
931
+ return output
932
+
933
+
934
+ def _create_hiera(variant: str, pretrained: bool = False, **kwargs) -> Hiera:
935
+ out_indices = kwargs.pop('out_indices', 4)
936
+
937
+ return build_model_with_cfg(
938
+ Hiera,
939
+ variant,
940
+ pretrained,
941
+ pretrained_filter_fn=checkpoint_filter_fn,
942
+ feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
943
+ **kwargs,
944
+ )
945
+
946
+
947
+ @register_model
948
+ def hiera_tiny_224(pretrained=False, **kwargs):
949
+ model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 7, 2))
950
+ return _create_hiera('hiera_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs))
951
+
952
+
953
+ @register_model
954
+ def hiera_small_224(pretrained=False, **kwargs):
955
+ model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2))
956
+ return _create_hiera('hiera_small_224', pretrained=pretrained, **dict(model_args, **kwargs))
957
+
958
+
959
+ @register_model
960
+ def hiera_base_224(pretrained=False, **kwargs):
961
+ model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3))
962
+ return _create_hiera('hiera_base_224', pretrained=pretrained, **dict(model_args, **kwargs))
963
+
964
+
965
+ @register_model
966
+ def hiera_base_plus_224(pretrained=False, **kwargs):
967
+ model_args = dict(embed_dim=112, num_heads=2, stages=(2, 3, 16, 3))
968
+ return _create_hiera('hiera_base_plus_224', pretrained=pretrained, **dict(model_args, **kwargs))
969
+
970
+
971
+ @register_model
972
+ def hiera_large_224(pretrained=False, **kwargs):
973
+ model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4))
974
+ return _create_hiera('hiera_large_224', pretrained=pretrained, **dict(model_args, **kwargs))
975
+
976
+
977
+ @register_model
978
+ def hiera_huge_224(pretrained=False, **kwargs):
979
+ model_args = dict(embed_dim=256, num_heads=4, stages=(2, 6, 36, 4))
980
+ return _create_hiera('hiera_huge_224', pretrained=pretrained, **dict(model_args, **kwargs))
981
+
982
+
983
+ @register_model
984
+ def hiera_small_abswin_256(pretrained=False, **kwargs):
985
+ model_args = dict(
986
+ embed_dim=96, num_heads=1, stages=(1, 2, 11, 2), abs_win_pos_embed=True, global_pos_size=(16, 16),
987
+ init_values=1e-5, weight_init='jax', use_expand_proj=False,
988
+ )
989
+ return _create_hiera('hiera_small_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs))
990
+
991
+
992
+ @register_model
993
+ def hiera_base_abswin_256(pretrained=False, **kwargs):
994
+ model_args = dict(
995
+ embed_dim=96, num_heads=1, stages=(2, 3, 16, 3), abs_win_pos_embed=True, init_values=1e-5, weight_init='jax')
996
+ return _create_hiera('hiera_base_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/hieradet_sam2.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from functools import partial
4
+ from typing import Callable, Dict, List, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from torch.jit import Final
10
+
11
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
12
+ from timm.layers import PatchEmbed, Mlp, DropPath, ClNormMlpClassifierHead, LayerScale, \
13
+ get_norm_layer, get_act_layer, init_weight_jax, init_weight_vit, to_2tuple, use_fused_attn
14
+
15
+ from ._builder import build_model_with_cfg
16
+ from ._features import feature_take_indices
17
+ from ._manipulate import named_apply, checkpoint_seq, adapt_input_conv
18
+ from ._registry import generate_default_cfgs, register_model, register_model_deprecations
19
+
20
+
21
+ def window_partition(x, window_size: Tuple[int, int]):
22
+ """
23
+ Partition into non-overlapping windows with padding if needed.
24
+ Args:
25
+ x (tensor): input tokens with [B, H, W, C].
26
+ window_size (int): window size.
27
+ Returns:
28
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
29
+ (Hp, Wp): padded height and width before partition
30
+ """
31
+ B, H, W, C = x.shape
32
+ x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
33
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
34
+ return windows
35
+
36
+
37
+ def window_unpartition(windows: torch.Tensor, window_size: Tuple[int, int], hw: Tuple[int, int]):
38
+ """
39
+ Window unpartition into original sequences and removing padding.
40
+ Args:
41
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
42
+ window_size (int): window size.
43
+ hw (Tuple): original height and width (H, W) before padding.
44
+ Returns:
45
+ x: unpartitioned sequences with [B, H, W, C].
46
+ """
47
+ H, W = hw
48
+ B = windows.shape[0] // (H * W // window_size[0] // window_size[1])
49
+ x = windows.view(B, H // window_size[0], W // window_size[1], window_size[0], window_size[1], -1)
50
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
51
+ return x
52
+
53
+
54
+ def _calc_pad(H: int, W: int, window_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
55
+ pad_h = (window_size[0] - H % window_size[0]) % window_size[0]
56
+ pad_w = (window_size[1] - W % window_size[1]) % window_size[1]
57
+ Hp, Wp = H + pad_h, W + pad_w
58
+ return Hp, Wp, pad_h, pad_w
59
+
60
+
61
+ class MultiScaleAttention(nn.Module):
62
+ fused_attn: torch.jit.Final[bool]
63
+
64
+ def __init__(
65
+ self,
66
+ dim: int,
67
+ dim_out: int,
68
+ num_heads: int,
69
+ q_pool: nn.Module = None,
70
+ ):
71
+ super().__init__()
72
+ self.dim = dim
73
+ self.dim_out = dim_out
74
+ self.num_heads = num_heads
75
+ head_dim = dim_out // num_heads
76
+ self.scale = head_dim ** -0.5
77
+ self.fused_attn = use_fused_attn()
78
+
79
+ self.q_pool = q_pool
80
+ self.qkv = nn.Linear(dim, dim_out * 3)
81
+ self.proj = nn.Linear(dim_out, dim_out)
82
+
83
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
84
+ B, H, W, _ = x.shape
85
+
86
+ # qkv with shape (B, H * W, 3, nHead, C)
87
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
88
+
89
+ # q, k, v with shape (B, H * W, nheads, C)
90
+ q, k, v = torch.unbind(qkv, 2)
91
+
92
+ # Q pooling (for downsample at stage changes)
93
+ if self.q_pool is not None:
94
+ q = q.reshape(B, H, W, -1).permute(0, 3, 1, 2) # to BCHW for pool
95
+ q = self.q_pool(q).permute(0, 2, 3, 1)
96
+ H, W = q.shape[1:3] # downsampled shape
97
+ q = q.reshape(B, H * W, self.num_heads, -1)
98
+
99
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
100
+ q = q.transpose(1, 2)
101
+ k = k.transpose(1, 2)
102
+ v = v.transpose(1, 2)
103
+ if self.fused_attn:
104
+ x = F.scaled_dot_product_attention(q, k, v)
105
+ else:
106
+ q = q * self.scale
107
+ attn = q @ k.transpose(-1, -2)
108
+ attn = attn.softmax(dim=-1)
109
+ x = attn @ v
110
+
111
+ # Transpose back
112
+ x = x.transpose(1, 2).reshape(B, H, W, -1)
113
+
114
+ x = self.proj(x)
115
+ return x
116
+
117
+
118
+ class MultiScaleBlock(nn.Module):
119
+ def __init__(
120
+ self,
121
+ dim: int,
122
+ dim_out: int,
123
+ num_heads: int,
124
+ mlp_ratio: float = 4.0,
125
+ q_stride: Optional[Tuple[int, int]] = None,
126
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
127
+ act_layer: Union[nn.Module, str] = "GELU",
128
+ window_size: int = 0,
129
+ init_values: Optional[float] = None,
130
+ drop_path: float = 0.0,
131
+ ):
132
+ super().__init__()
133
+ norm_layer = get_norm_layer(norm_layer)
134
+ act_layer = get_act_layer(act_layer)
135
+ self.window_size = to_2tuple(window_size)
136
+ self.is_windowed = any(self.window_size)
137
+ self.dim = dim
138
+ self.dim_out = dim_out
139
+ self.q_stride = q_stride
140
+
141
+ if dim != dim_out:
142
+ self.proj = nn.Linear(dim, dim_out)
143
+ else:
144
+ self.proj = nn.Identity()
145
+ self.pool = None
146
+ if self.q_stride:
147
+ # note make a different instance for this Module so that it's not shared with attn module
148
+ self.pool = nn.MaxPool2d(
149
+ kernel_size=q_stride,
150
+ stride=q_stride,
151
+ ceil_mode=False,
152
+ )
153
+
154
+ self.norm1 = norm_layer(dim)
155
+ self.attn = MultiScaleAttention(
156
+ dim,
157
+ dim_out,
158
+ num_heads=num_heads,
159
+ q_pool=deepcopy(self.pool),
160
+ )
161
+ self.ls1 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity()
162
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
163
+
164
+ self.norm2 = norm_layer(dim_out)
165
+ self.mlp = Mlp(
166
+ dim_out,
167
+ int(dim_out * mlp_ratio),
168
+ act_layer=act_layer,
169
+ )
170
+ self.ls2 = LayerScale(dim_out, init_values) if init_values is not None else nn.Identity()
171
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
172
+
173
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
174
+ shortcut = x # B, H, W, C
175
+ x = self.norm1(x)
176
+
177
+ # Skip connection
178
+ if self.dim != self.dim_out:
179
+ shortcut = self.proj(x)
180
+ if self.pool is not None:
181
+ shortcut = shortcut.permute(0, 3, 1, 2)
182
+ shortcut = self.pool(shortcut).permute(0, 2, 3, 1)
183
+
184
+ # Window partition
185
+ window_size = self.window_size
186
+ H, W = x.shape[1:3]
187
+ Hp, Wp = H, W # keep torchscript happy
188
+ if self.is_windowed:
189
+ Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size)
190
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
191
+ x = window_partition(x, window_size)
192
+
193
+ # Window Attention + Q Pooling (if stage change)
194
+ x = self.attn(x)
195
+ if self.q_stride is not None:
196
+ # Shapes have changed due to Q pooling
197
+ window_size = (self.window_size[0] // self.q_stride[0], self.window_size[1] // self.q_stride[1])
198
+ H, W = shortcut.shape[1:3]
199
+ Hp, Wp, pad_h, pad_w = _calc_pad(H, W, window_size)
200
+
201
+ # Reverse window partition
202
+ if self.is_windowed:
203
+ x = window_unpartition(x, window_size, (Hp, Wp))
204
+ x = x[:, :H, :W, :].contiguous() # unpad
205
+
206
+ x = shortcut + self.drop_path1(self.ls1(x))
207
+ x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
208
+ return x
209
+
210
+
211
+ class HieraPatchEmbed(nn.Module):
212
+ """
213
+ Image to Patch Embedding.
214
+ """
215
+
216
+ def __init__(
217
+ self,
218
+ kernel_size: Tuple[int, ...] = (7, 7),
219
+ stride: Tuple[int, ...] = (4, 4),
220
+ padding: Tuple[int, ...] = (3, 3),
221
+ in_chans: int = 3,
222
+ embed_dim: int = 768,
223
+ ):
224
+ """
225
+ Args:
226
+ kernel_size (Tuple): kernel size of the projection layer.
227
+ stride (Tuple): stride of the projection layer.
228
+ padding (Tuple): padding size of the projection layer.
229
+ in_chans (int): Number of input image channels.
230
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
231
+ """
232
+ super().__init__()
233
+ self.proj = nn.Conv2d(
234
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
235
+ )
236
+
237
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
238
+ x = self.proj(x)
239
+ # B C H W -> B H W C
240
+ x = x.permute(0, 2, 3, 1)
241
+ return x
242
+
243
+
244
+ class HieraDet(nn.Module):
245
+ """
246
+ Reference: https://arxiv.org/abs/2306.00989
247
+ """
248
+
249
+ def __init__(
250
+ self,
251
+ in_chans: int = 3,
252
+ num_classes: int = 1000,
253
+ global_pool: str = 'avg',
254
+ embed_dim: int = 96, # initial embed dim
255
+ num_heads: int = 1, # initial number of heads
256
+ patch_kernel: Tuple[int, ...] = (7, 7),
257
+ patch_stride: Tuple[int, ...] = (4, 4),
258
+ patch_padding: Tuple[int, ...] = (3, 3),
259
+ patch_size: Optional[Tuple[int, ...]] = None,
260
+ q_pool: int = 3, # number of q_pool stages
261
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
262
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
263
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
264
+ head_mul: float = 2.0, # head_mul factor at stage shift
265
+ global_pos_size: Tuple[int, int] = (7, 7),
266
+ # window size per stage, when not using global att.
267
+ window_spec: Tuple[int, ...] = (
268
+ 8,
269
+ 4,
270
+ 14,
271
+ 7,
272
+ ),
273
+ # global attn in these blocks
274
+ global_att_blocks: Tuple[int, ...] = (
275
+ 12,
276
+ 16,
277
+ 20,
278
+ ),
279
+ init_values: Optional[float] = None,
280
+ weight_init: str = '',
281
+ fix_init: bool = True,
282
+ head_init_scale: float = 0.001,
283
+ drop_rate: float = 0.0,
284
+ drop_path_rate: float = 0.0, # stochastic depth
285
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
286
+ act_layer: Union[nn.Module, str] = "GELU",
287
+ ):
288
+ super().__init__()
289
+ norm_layer = get_norm_layer(norm_layer)
290
+ act_layer = get_act_layer(act_layer)
291
+ assert len(stages) == len(window_spec)
292
+ self.num_classes = num_classes
293
+ self.window_spec = window_spec
294
+ self.output_fmt = 'NHWC'
295
+
296
+ depth = sum(stages)
297
+ self.q_stride = q_stride
298
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
299
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
300
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
301
+
302
+ if patch_size is not None:
303
+ # use a non-overlapping vit style patch embed
304
+ self.patch_embed = PatchEmbed(
305
+ img_size=None,
306
+ patch_size=patch_size,
307
+ in_chans=in_chans,
308
+ embed_dim=embed_dim,
309
+ output_fmt='NHWC',
310
+ dynamic_img_pad=True,
311
+ )
312
+ else:
313
+ self.patch_embed = HieraPatchEmbed(
314
+ kernel_size=patch_kernel,
315
+ stride=patch_stride,
316
+ padding=patch_padding,
317
+ in_chans=in_chans,
318
+ embed_dim=embed_dim,
319
+ )
320
+ # Which blocks have global att?
321
+ self.global_att_blocks = global_att_blocks
322
+
323
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
324
+ self.global_pos_size = global_pos_size
325
+ self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *self.global_pos_size))
326
+ self.pos_embed_window = nn.Parameter(torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0]))
327
+
328
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
329
+ cur_stage = 0
330
+ self.blocks = nn.Sequential()
331
+ self.feature_info = []
332
+ for i in range(depth):
333
+ dim_out = embed_dim
334
+ # lags by a block, so first block of
335
+ # next stage uses an initial window size
336
+ # of previous stage and final window size of current stage
337
+ window_size = self.window_spec[cur_stage]
338
+
339
+ if self.global_att_blocks is not None:
340
+ window_size = 0 if i in self.global_att_blocks else window_size
341
+
342
+ if i - 1 in self.stage_ends:
343
+ dim_out = int(embed_dim * dim_mul)
344
+ num_heads = int(num_heads * head_mul)
345
+ cur_stage += 1
346
+
347
+ block = MultiScaleBlock(
348
+ dim=embed_dim,
349
+ dim_out=dim_out,
350
+ num_heads=num_heads,
351
+ drop_path=dpr[i],
352
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
353
+ window_size=window_size,
354
+ norm_layer=norm_layer,
355
+ act_layer=act_layer,
356
+ )
357
+
358
+ embed_dim = dim_out
359
+ self.blocks.append(block)
360
+ if i in self.stage_ends:
361
+ self.feature_info += [
362
+ dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')]
363
+
364
+ self.num_features = self.head_hidden_size = embed_dim
365
+ self.head = ClNormMlpClassifierHead(
366
+ embed_dim,
367
+ num_classes,
368
+ pool_type=global_pool,
369
+ drop_rate=drop_rate,
370
+ norm_layer=norm_layer,
371
+ )
372
+
373
+ # Initialize everything
374
+ if self.pos_embed is not None:
375
+ nn.init.trunc_normal_(self.pos_embed, std=0.02)
376
+
377
+ if self.pos_embed_window is not None:
378
+ nn.init.trunc_normal_(self.pos_embed_window, std=0.02)
379
+
380
+ if weight_init != 'skip':
381
+ init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit
382
+ init_fn = partial(init_fn, classifier_name='head.fc')
383
+ named_apply(init_fn, self)
384
+
385
+ if fix_init:
386
+ self.fix_init_weight()
387
+
388
+ if isinstance(self.head, ClNormMlpClassifierHead) and isinstance(self.head.fc, nn.Linear):
389
+ self.head.fc.weight.data.mul_(head_init_scale)
390
+ self.head.fc.bias.data.mul_(head_init_scale)
391
+
392
+ def _pos_embed(self, x: torch.Tensor) -> torch.Tensor:
393
+ h, w = x.shape[1:3]
394
+ window_embed = self.pos_embed_window
395
+ pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
396
+ tile_h = pos_embed.shape[-2] // window_embed.shape[-2]
397
+ tile_w = pos_embed.shape[-1] // window_embed.shape[-1]
398
+ pos_embed = pos_embed + window_embed.tile((tile_h, tile_w))
399
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
400
+ return x + pos_embed
401
+
402
+ def fix_init_weight(self):
403
+ def rescale(param, _layer_id):
404
+ param.div_(math.sqrt(2.0 * _layer_id))
405
+
406
+ for layer_id, layer in enumerate(self.blocks):
407
+ rescale(layer.attn.proj.weight.data, layer_id + 1)
408
+ rescale(layer.mlp.fc2.weight.data, layer_id + 1)
409
+
410
+ @torch.jit.ignore
411
+ def no_weight_decay(self):
412
+ return ['pos_embed', 'pos_embed_window']
413
+
414
+ @torch.jit.ignore
415
+ def group_matcher(self, coarse: bool = False) -> Dict:
416
+ return dict(
417
+ stem=r'^pos_embed|pos_embed_window|patch_embed',
418
+ blocks=[(r'^blocks\.(\d+)', None)]
419
+ )
420
+
421
+ @torch.jit.ignore
422
+ def set_grad_checkpointing(self, enable: bool = True) -> None:
423
+ self.grad_checkpointing = enable
424
+
425
+ @torch.jit.ignore
426
+ def get_classifier(self):
427
+ return self.head.fc
428
+
429
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False):
430
+ self.num_classes = num_classes
431
+ self.head.reset(num_classes, pool_type=global_pool, reset_other=reset_other)
432
+
433
+ def forward_intermediates(
434
+ self,
435
+ x: torch.Tensor,
436
+ indices: Optional[Union[int, List[int]]] = None,
437
+ norm: bool = False,
438
+ stop_early: bool = True,
439
+ output_fmt: str = 'NCHW',
440
+ intermediates_only: bool = False,
441
+ coarse: bool = True,
442
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
443
+ """ Forward features that returns intermediates.
444
+
445
+ Args:
446
+ x: Input image tensor
447
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
448
+ norm: Apply norm layer to all intermediates
449
+ stop_early: Stop iterating over blocks when last desired intermediate hit
450
+ output_fmt: Shape of intermediate feature outputs
451
+ intermediates_only: Only return intermediate features
452
+ coarse: Take coarse features (stage ends) if true, otherwise all block featrures
453
+ Returns:
454
+
455
+ """
456
+ assert not norm, 'normalization of features not supported'
457
+ assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.'
458
+ if coarse:
459
+ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
460
+ take_indices = [self.stage_ends[i] for i in take_indices]
461
+ max_index = self.stage_ends[max_index]
462
+ else:
463
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
464
+
465
+ x = self.patch_embed(x)
466
+ x = self._pos_embed(x)
467
+
468
+ intermediates = []
469
+ if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
470
+ blocks = self.blocks
471
+ else:
472
+ blocks = self.blocks[:max_index + 1]
473
+ for i, blk in enumerate(blocks):
474
+ x = blk(x)
475
+ if i in take_indices:
476
+ x_out = x.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x
477
+ intermediates.append(x_out)
478
+
479
+ if intermediates_only:
480
+ return intermediates
481
+
482
+ return x, intermediates
483
+
484
+ def prune_intermediate_layers(
485
+ self,
486
+ indices: Union[int, List[int]] = 1,
487
+ prune_norm: bool = False,
488
+ prune_head: bool = True,
489
+ coarse: bool = True,
490
+ ):
491
+ """ Prune layers not required for specified intermediates.
492
+ """
493
+ if coarse:
494
+ take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
495
+ max_index = self.stage_ends[max_index]
496
+ else:
497
+ take_indices, max_index = feature_take_indices(len(self.blocks), indices)
498
+ self.blocks = self.blocks[:max_index + 1] # truncate blocks
499
+ if prune_head:
500
+ self.head.reset(0, reset_other=prune_norm)
501
+ return take_indices
502
+
503
+ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
504
+ x = self.patch_embed(x) # BHWC
505
+ x = self._pos_embed(x)
506
+ for i, blk in enumerate(self.blocks):
507
+ x = blk(x)
508
+ return x
509
+
510
+ def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
511
+ x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
512
+ return x
513
+
514
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
515
+ x = self.forward_features(x)
516
+ x = self.forward_head(x)
517
+ return x
518
+
519
+
520
+ # NOTE sam2 appears to use 1024x1024 for all models, but T, S, & B+ have windows that fit multiples of 224.
521
+ def _cfg(url='', **kwargs):
522
+ return {
523
+ 'url': url,
524
+ 'num_classes': 0, 'input_size': (3, 896, 896), 'pool_size': (28, 28),
525
+ 'crop_pct': 1.0, 'interpolation': 'bicubic', 'min_input_size': (3, 224, 224),
526
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
527
+ 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc',
528
+ **kwargs
529
+ }
530
+
531
+
532
+ default_cfgs = generate_default_cfgs({
533
+ "sam2_hiera_tiny.r224": _cfg(
534
+ hf_hub_id='facebook/sam2-hiera-tiny',
535
+ hf_hub_filename='sam2_hiera_tiny.pt',
536
+ input_size=(3, 224, 224), pool_size=(7, 7),
537
+ ), # FIXME reduced res for testing
538
+ "sam2_hiera_tiny.r896": _cfg(
539
+ hf_hub_id='facebook/sam2-hiera-tiny',
540
+ hf_hub_filename='sam2_hiera_tiny.pt',
541
+ ),
542
+ "sam2_hiera_small": _cfg(
543
+ hf_hub_id='facebook/sam2-hiera-small',
544
+ hf_hub_filename='sam2_hiera_small.pt',
545
+ ),
546
+ "sam2_hiera_base_plus": _cfg(
547
+ hf_hub_id='facebook/sam2-hiera-base-plus',
548
+ hf_hub_filename='sam2_hiera_base_plus.pt',
549
+ ),
550
+ "sam2_hiera_large": _cfg(
551
+ hf_hub_id='facebook/sam2-hiera-large',
552
+ hf_hub_filename='sam2_hiera_large.pt',
553
+ min_input_size=(3, 256, 256),
554
+ input_size=(3, 1024, 1024), pool_size=(32, 32),
555
+ ),
556
+ "hieradet_small.untrained": _cfg(
557
+ num_classes=1000,
558
+ input_size=(3, 256, 256), pool_size=(8, 8),
559
+ ),
560
+ })
561
+
562
+
563
+ def checkpoint_filter_fn(state_dict, model=None, prefix=''):
564
+ state_dict = state_dict.get('model', state_dict)
565
+
566
+ output = {}
567
+ for k, v in state_dict.items():
568
+ if k.startswith(prefix):
569
+ k = k.replace(prefix, '')
570
+ else:
571
+ continue
572
+ k = k.replace('mlp.layers.0', 'mlp.fc1')
573
+ k = k.replace('mlp.layers.1', 'mlp.fc2')
574
+ output[k] = v
575
+ return output
576
+
577
+
578
+ def _create_hiera_det(variant: str, pretrained: bool = False, **kwargs) -> HieraDet:
579
+ out_indices = kwargs.pop('out_indices', 4)
580
+ checkpoint_prefix = ''
581
+ if 'sam2' in variant:
582
+ # SAM2 pretrained weights have no classifier or final norm-layer (`head.norm`)
583
+ # This is workaround loading with num_classes=0 w/o removing norm-layer.
584
+ kwargs.setdefault('pretrained_strict', False)
585
+ checkpoint_prefix = 'image_encoder.trunk.'
586
+ return build_model_with_cfg(
587
+ HieraDet,
588
+ variant,
589
+ pretrained,
590
+ pretrained_filter_fn=partial(checkpoint_filter_fn, prefix=checkpoint_prefix),
591
+ feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
592
+ **kwargs,
593
+ )
594
+
595
+
596
+ @register_model
597
+ def sam2_hiera_tiny(pretrained=False, **kwargs):
598
+ model_args = dict(stages=(1, 2, 7, 2), global_att_blocks=(5, 7, 9))
599
+ return _create_hiera_det('sam2_hiera_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
600
+
601
+
602
+ @register_model
603
+ def sam2_hiera_small(pretrained=False, **kwargs):
604
+ model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13))
605
+ return _create_hiera_det('sam2_hiera_small', pretrained=pretrained, **dict(model_args, **kwargs))
606
+
607
+
608
+ @register_model
609
+ def sam2_hiera_base_plus(pretrained=False, **kwargs):
610
+ model_args = dict(embed_dim=112, num_heads=2, global_pos_size=(14, 14))
611
+ return _create_hiera_det('sam2_hiera_base_plus', pretrained=pretrained, **dict(model_args, **kwargs))
612
+
613
+
614
+ @register_model
615
+ def sam2_hiera_large(pretrained=False, **kwargs):
616
+ model_args = dict(
617
+ embed_dim=144,
618
+ num_heads=2,
619
+ stages=(2, 6, 36, 4),
620
+ global_att_blocks=(23, 33, 43),
621
+ window_spec=(8, 4, 16, 8),
622
+ )
623
+ return _create_hiera_det('sam2_hiera_large', pretrained=pretrained, **dict(model_args, **kwargs))
624
+
625
+
626
+ @register_model
627
+ def hieradet_small(pretrained=False, **kwargs):
628
+ model_args = dict(stages=(1, 2, 11, 2), global_att_blocks=(7, 10, 13), window_spec=(8, 4, 16, 8), init_values=1e-5)
629
+ return _create_hiera_det('hieradet_small', pretrained=pretrained, **dict(model_args, **kwargs))
630
+
631
+
632
+ # @register_model
633
+ # def hieradet_base(pretrained=False, **kwargs):
634
+ # model_args = dict(window_spec=(8, 4, 16, 8))
635
+ # return _create_hiera_det('hieradet_base', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/hub.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from ._hub import *
2
+
3
+ import warnings
4
+ warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.models", FutureWarning)
pytorch-image-models/timm/models/inception_next.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ InceptionNeXt paper: https://arxiv.org/abs/2303.16900
3
+ Original implementation & weights from: https://github.com/sail-sg/inceptionnext
4
+ """
5
+
6
+ from functools import partial
7
+ from typing import Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+
12
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
13
+ from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d
14
+ from ._builder import build_model_with_cfg
15
+ from ._manipulate import checkpoint_seq
16
+ from ._registry import register_model, generate_default_cfgs
17
+
18
+ __all__ = ['MetaNeXt']
19
+
20
+
21
+ class InceptionDWConv2d(nn.Module):
22
+ """ Inception depthwise convolution
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ in_chs,
28
+ square_kernel_size=3,
29
+ band_kernel_size=11,
30
+ branch_ratio=0.125,
31
+ dilation=1,
32
+ ):
33
+ super().__init__()
34
+
35
+ gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch
36
+ square_padding = get_padding(square_kernel_size, dilation=dilation)
37
+ band_padding = get_padding(band_kernel_size, dilation=dilation)
38
+ self.dwconv_hw = nn.Conv2d(
39
+ gc, gc, square_kernel_size,
40
+ padding=square_padding, dilation=dilation, groups=gc)
41
+ self.dwconv_w = nn.Conv2d(
42
+ gc, gc, (1, band_kernel_size),
43
+ padding=(0, band_padding), dilation=(1, dilation), groups=gc)
44
+ self.dwconv_h = nn.Conv2d(
45
+ gc, gc, (band_kernel_size, 1),
46
+ padding=(band_padding, 0), dilation=(dilation, 1), groups=gc)
47
+ self.split_indexes = (in_chs - 3 * gc, gc, gc, gc)
48
+
49
+ def forward(self, x):
50
+ x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1)
51
+ return torch.cat((
52
+ x_id,
53
+ self.dwconv_hw(x_hw),
54
+ self.dwconv_w(x_w),
55
+ self.dwconv_h(x_h)
56
+ ), dim=1,
57
+ )
58
+
59
+
60
+ class ConvMlp(nn.Module):
61
+ """ MLP using 1x1 convs that keeps spatial dims
62
+ copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ in_features,
68
+ hidden_features=None,
69
+ out_features=None,
70
+ act_layer=nn.ReLU,
71
+ norm_layer=None,
72
+ bias=True,
73
+ drop=0.,
74
+ ):
75
+ super().__init__()
76
+ out_features = out_features or in_features
77
+ hidden_features = hidden_features or in_features
78
+ bias = to_2tuple(bias)
79
+
80
+ self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
81
+ self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
82
+ self.act = act_layer()
83
+ self.drop = nn.Dropout(drop)
84
+ self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
85
+
86
+ def forward(self, x):
87
+ x = self.fc1(x)
88
+ x = self.norm(x)
89
+ x = self.act(x)
90
+ x = self.drop(x)
91
+ x = self.fc2(x)
92
+ return x
93
+
94
+
95
+ class MlpClassifierHead(nn.Module):
96
+ """ MLP classification head
97
+ """
98
+
99
+ def __init__(
100
+ self,
101
+ in_features,
102
+ num_classes=1000,
103
+ pool_type='avg',
104
+ mlp_ratio=3,
105
+ act_layer=nn.GELU,
106
+ norm_layer=partial(nn.LayerNorm, eps=1e-6),
107
+ drop=0.,
108
+ bias=True
109
+ ):
110
+ super().__init__()
111
+ self.use_conv = False
112
+ self.in_features = in_features
113
+ self.num_features = hidden_features = int(mlp_ratio * in_features)
114
+
115
+ assert pool_type, 'Cannot disable pooling'
116
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
117
+
118
+ self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias)
119
+ self.act = act_layer()
120
+ self.norm = norm_layer(hidden_features)
121
+ self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
122
+ self.drop = nn.Dropout(drop)
123
+
124
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
125
+ if pool_type is not None:
126
+ assert pool_type, 'Cannot disable pooling'
127
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
128
+
129
+ self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
130
+
131
+ def forward(self, x, pre_logits: bool = False):
132
+ x = self.global_pool(x)
133
+ x = self.fc1(x)
134
+ x = self.act(x)
135
+ x = self.norm(x)
136
+ x = self.drop(x)
137
+ return x if pre_logits else self.fc2(x)
138
+
139
+
140
+ class MetaNeXtBlock(nn.Module):
141
+ """ MetaNeXtBlock Block
142
+ Args:
143
+ dim (int): Number of input channels.
144
+ drop_path (float): Stochastic depth rate. Default: 0.0
145
+ ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
146
+ """
147
+
148
+ def __init__(
149
+ self,
150
+ dim,
151
+ dilation=1,
152
+ token_mixer=InceptionDWConv2d,
153
+ norm_layer=nn.BatchNorm2d,
154
+ mlp_layer=ConvMlp,
155
+ mlp_ratio=4,
156
+ act_layer=nn.GELU,
157
+ ls_init_value=1e-6,
158
+ drop_path=0.,
159
+
160
+ ):
161
+ super().__init__()
162
+ self.token_mixer = token_mixer(dim, dilation=dilation)
163
+ self.norm = norm_layer(dim)
164
+ self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer)
165
+ self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None
166
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
167
+
168
+ def forward(self, x):
169
+ shortcut = x
170
+ x = self.token_mixer(x)
171
+ x = self.norm(x)
172
+ x = self.mlp(x)
173
+ if self.gamma is not None:
174
+ x = x.mul(self.gamma.reshape(1, -1, 1, 1))
175
+ x = self.drop_path(x) + shortcut
176
+ return x
177
+
178
+
179
+ class MetaNeXtStage(nn.Module):
180
+ def __init__(
181
+ self,
182
+ in_chs,
183
+ out_chs,
184
+ stride=2,
185
+ depth=2,
186
+ dilation=(1, 1),
187
+ drop_path_rates=None,
188
+ ls_init_value=1.0,
189
+ token_mixer=InceptionDWConv2d,
190
+ act_layer=nn.GELU,
191
+ norm_layer=None,
192
+ mlp_ratio=4,
193
+ ):
194
+ super().__init__()
195
+ self.grad_checkpointing = False
196
+ if stride > 1 or dilation[0] != dilation[1]:
197
+ self.downsample = nn.Sequential(
198
+ norm_layer(in_chs),
199
+ nn.Conv2d(
200
+ in_chs,
201
+ out_chs,
202
+ kernel_size=2,
203
+ stride=stride,
204
+ dilation=dilation[0],
205
+ ),
206
+ )
207
+ else:
208
+ self.downsample = nn.Identity()
209
+
210
+ drop_path_rates = drop_path_rates or [0.] * depth
211
+ stage_blocks = []
212
+ for i in range(depth):
213
+ stage_blocks.append(MetaNeXtBlock(
214
+ dim=out_chs,
215
+ dilation=dilation[1],
216
+ drop_path=drop_path_rates[i],
217
+ ls_init_value=ls_init_value,
218
+ token_mixer=token_mixer,
219
+ act_layer=act_layer,
220
+ norm_layer=norm_layer,
221
+ mlp_ratio=mlp_ratio,
222
+ ))
223
+ self.blocks = nn.Sequential(*stage_blocks)
224
+
225
+ def forward(self, x):
226
+ x = self.downsample(x)
227
+ if self.grad_checkpointing and not torch.jit.is_scripting():
228
+ x = checkpoint_seq(self.blocks, x)
229
+ else:
230
+ x = self.blocks(x)
231
+ return x
232
+
233
+
234
+ class MetaNeXt(nn.Module):
235
+ r""" MetaNeXt
236
+ A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900
237
+
238
+ Args:
239
+ in_chans (int): Number of input image channels. Default: 3
240
+ num_classes (int): Number of classes for classification head. Default: 1000
241
+ depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3)
242
+ dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768)
243
+ token_mixers: Token mixer function. Default: nn.Identity
244
+ norm_layer: Normalization layer. Default: nn.BatchNorm2d
245
+ act_layer: Activation function for MLP. Default: nn.GELU
246
+ mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3)
247
+ drop_rate (float): Head dropout rate
248
+ drop_path_rate (float): Stochastic depth rate. Default: 0.
249
+ ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
250
+ """
251
+
252
+ def __init__(
253
+ self,
254
+ in_chans=3,
255
+ num_classes=1000,
256
+ global_pool='avg',
257
+ output_stride=32,
258
+ depths=(3, 3, 9, 3),
259
+ dims=(96, 192, 384, 768),
260
+ token_mixers=InceptionDWConv2d,
261
+ norm_layer=nn.BatchNorm2d,
262
+ act_layer=nn.GELU,
263
+ mlp_ratios=(4, 4, 4, 3),
264
+ drop_rate=0.,
265
+ drop_path_rate=0.,
266
+ ls_init_value=1e-6,
267
+ ):
268
+ super().__init__()
269
+
270
+ num_stage = len(depths)
271
+ if not isinstance(token_mixers, (list, tuple)):
272
+ token_mixers = [token_mixers] * num_stage
273
+ if not isinstance(mlp_ratios, (list, tuple)):
274
+ mlp_ratios = [mlp_ratios] * num_stage
275
+ self.num_classes = num_classes
276
+ self.global_pool = global_pool
277
+ self.drop_rate = drop_rate
278
+ self.feature_info = []
279
+
280
+ self.stem = nn.Sequential(
281
+ nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
282
+ norm_layer(dims[0])
283
+ )
284
+
285
+ dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
286
+ prev_chs = dims[0]
287
+ curr_stride = 4
288
+ dilation = 1
289
+ # feature resolution stages, each consisting of multiple residual blocks
290
+ self.stages = nn.Sequential()
291
+ for i in range(num_stage):
292
+ stride = 2 if curr_stride == 2 or i > 0 else 1
293
+ if curr_stride >= output_stride and stride > 1:
294
+ dilation *= stride
295
+ stride = 1
296
+ curr_stride *= stride
297
+ first_dilation = 1 if dilation in (1, 2) else 2
298
+ out_chs = dims[i]
299
+ self.stages.append(MetaNeXtStage(
300
+ prev_chs,
301
+ out_chs,
302
+ stride=stride if i > 0 else 1,
303
+ dilation=(first_dilation, dilation),
304
+ depth=depths[i],
305
+ drop_path_rates=dp_rates[i],
306
+ ls_init_value=ls_init_value,
307
+ act_layer=act_layer,
308
+ token_mixer=token_mixers[i],
309
+ norm_layer=norm_layer,
310
+ mlp_ratio=mlp_ratios[i],
311
+ ))
312
+ prev_chs = out_chs
313
+ self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
314
+ self.num_features = prev_chs
315
+ self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate)
316
+ self.head_hidden_size = self.head.num_features
317
+ self.apply(self._init_weights)
318
+
319
+ def _init_weights(self, m):
320
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
321
+ trunc_normal_(m.weight, std=.02)
322
+ if m.bias is not None:
323
+ nn.init.constant_(m.bias, 0)
324
+
325
+ @torch.jit.ignore
326
+ def group_matcher(self, coarse=False):
327
+ return dict(
328
+ stem=r'^stem',
329
+ blocks=r'^stages\.(\d+)' if coarse else [
330
+ (r'^stages\.(\d+)\.downsample', (0,)), # blocks
331
+ (r'^stages\.(\d+)\.blocks\.(\d+)', None),
332
+ ]
333
+ )
334
+
335
+ @torch.jit.ignore
336
+ def get_classifier(self) -> nn.Module:
337
+ return self.head.fc2
338
+
339
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
340
+ self.num_classes = num_classes
341
+ self.head.reset(num_classes, global_pool)
342
+
343
+ @torch.jit.ignore
344
+ def set_grad_checkpointing(self, enable=True):
345
+ for s in self.stages:
346
+ s.grad_checkpointing = enable
347
+
348
+ @torch.jit.ignore
349
+ def no_weight_decay(self):
350
+ return set()
351
+
352
+ def forward_features(self, x):
353
+ x = self.stem(x)
354
+ x = self.stages(x)
355
+ return x
356
+
357
+ def forward_head(self, x, pre_logits: bool = False):
358
+ return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
359
+
360
+ def forward(self, x):
361
+ x = self.forward_features(x)
362
+ x = self.forward_head(x)
363
+ return x
364
+
365
+
366
+ def _cfg(url='', **kwargs):
367
+ return {
368
+ 'url': url,
369
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
370
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
371
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
372
+ 'first_conv': 'stem.0', 'classifier': 'head.fc2',
373
+ **kwargs
374
+ }
375
+
376
+
377
+ default_cfgs = generate_default_cfgs({
378
+ 'inception_next_atto.sail_in1k': _cfg(
379
+ hf_hub_id='timm/',
380
+ # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_atto.pth',
381
+ ),
382
+ 'inception_next_tiny.sail_in1k': _cfg(
383
+ hf_hub_id='timm/',
384
+ # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth',
385
+ ),
386
+ 'inception_next_small.sail_in1k': _cfg(
387
+ hf_hub_id='timm/',
388
+ # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth',
389
+ ),
390
+ 'inception_next_base.sail_in1k': _cfg(
391
+ hf_hub_id='timm/',
392
+ # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth',
393
+ crop_pct=0.95,
394
+ ),
395
+ 'inception_next_base.sail_in1k_384': _cfg(
396
+ hf_hub_id='timm/',
397
+ # url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth',
398
+ input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
399
+ ),
400
+ })
401
+
402
+
403
+ def _create_inception_next(variant, pretrained=False, **kwargs):
404
+ model = build_model_with_cfg(
405
+ MetaNeXt, variant, pretrained,
406
+ feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
407
+ **kwargs,
408
+ )
409
+ return model
410
+
411
+
412
+ @register_model
413
+ def inception_next_atto(pretrained=False, **kwargs):
414
+ model_args = dict(
415
+ depths=(2, 2, 6, 2), dims=(40, 80, 160, 320),
416
+ token_mixers=partial(InceptionDWConv2d, band_kernel_size=9, branch_ratio=0.25)
417
+ )
418
+ return _create_inception_next('inception_next_atto', pretrained=pretrained, **dict(model_args, **kwargs))
419
+
420
+
421
+ @register_model
422
+ def inception_next_tiny(pretrained=False, **kwargs):
423
+ model_args = dict(
424
+ depths=(3, 3, 9, 3), dims=(96, 192, 384, 768),
425
+ token_mixers=InceptionDWConv2d,
426
+ )
427
+ return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
428
+
429
+
430
+ @register_model
431
+ def inception_next_small(pretrained=False, **kwargs):
432
+ model_args = dict(
433
+ depths=(3, 3, 27, 3), dims=(96, 192, 384, 768),
434
+ token_mixers=InceptionDWConv2d,
435
+ )
436
+ return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs))
437
+
438
+
439
+ @register_model
440
+ def inception_next_base(pretrained=False, **kwargs):
441
+ model_args = dict(
442
+ depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024),
443
+ token_mixers=InceptionDWConv2d,
444
+ )
445
+ return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/inception_resnet_v2.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Pytorch Inception-Resnet-V2 implementation
2
+ Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
3
+ based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
4
+ """
5
+ from functools import partial
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+
10
+ from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
11
+ from timm.layers import create_classifier, ConvNormAct
12
+ from ._builder import build_model_with_cfg
13
+ from ._manipulate import flatten_modules
14
+ from ._registry import register_model, generate_default_cfgs, register_model_deprecations
15
+
16
+ __all__ = ['InceptionResnetV2']
17
+
18
+
19
+ class Mixed_5b(nn.Module):
20
+ def __init__(self, conv_block=None):
21
+ super(Mixed_5b, self).__init__()
22
+ conv_block = conv_block or ConvNormAct
23
+
24
+ self.branch0 = conv_block(192, 96, kernel_size=1, stride=1)
25
+
26
+ self.branch1 = nn.Sequential(
27
+ conv_block(192, 48, kernel_size=1, stride=1),
28
+ conv_block(48, 64, kernel_size=5, stride=1, padding=2)
29
+ )
30
+
31
+ self.branch2 = nn.Sequential(
32
+ conv_block(192, 64, kernel_size=1, stride=1),
33
+ conv_block(64, 96, kernel_size=3, stride=1, padding=1),
34
+ conv_block(96, 96, kernel_size=3, stride=1, padding=1)
35
+ )
36
+
37
+ self.branch3 = nn.Sequential(
38
+ nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
39
+ conv_block(192, 64, kernel_size=1, stride=1)
40
+ )
41
+
42
+ def forward(self, x):
43
+ x0 = self.branch0(x)
44
+ x1 = self.branch1(x)
45
+ x2 = self.branch2(x)
46
+ x3 = self.branch3(x)
47
+ out = torch.cat((x0, x1, x2, x3), 1)
48
+ return out
49
+
50
+
51
+ class Block35(nn.Module):
52
+ def __init__(self, scale=1.0, conv_block=None):
53
+ super(Block35, self).__init__()
54
+ self.scale = scale
55
+ conv_block = conv_block or ConvNormAct
56
+
57
+ self.branch0 = conv_block(320, 32, kernel_size=1, stride=1)
58
+
59
+ self.branch1 = nn.Sequential(
60
+ conv_block(320, 32, kernel_size=1, stride=1),
61
+ conv_block(32, 32, kernel_size=3, stride=1, padding=1)
62
+ )
63
+
64
+ self.branch2 = nn.Sequential(
65
+ conv_block(320, 32, kernel_size=1, stride=1),
66
+ conv_block(32, 48, kernel_size=3, stride=1, padding=1),
67
+ conv_block(48, 64, kernel_size=3, stride=1, padding=1)
68
+ )
69
+
70
+ self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)
71
+ self.act = nn.ReLU()
72
+
73
+ def forward(self, x):
74
+ x0 = self.branch0(x)
75
+ x1 = self.branch1(x)
76
+ x2 = self.branch2(x)
77
+ out = torch.cat((x0, x1, x2), 1)
78
+ out = self.conv2d(out)
79
+ out = out * self.scale + x
80
+ out = self.act(out)
81
+ return out
82
+
83
+
84
+ class Mixed_6a(nn.Module):
85
+ def __init__(self, conv_block=None):
86
+ super(Mixed_6a, self).__init__()
87
+ conv_block = conv_block or ConvNormAct
88
+
89
+ self.branch0 = conv_block(320, 384, kernel_size=3, stride=2)
90
+
91
+ self.branch1 = nn.Sequential(
92
+ conv_block(320, 256, kernel_size=1, stride=1),
93
+ conv_block(256, 256, kernel_size=3, stride=1, padding=1),
94
+ conv_block(256, 384, kernel_size=3, stride=2)
95
+ )
96
+
97
+ self.branch2 = nn.MaxPool2d(3, stride=2)
98
+
99
+ def forward(self, x):
100
+ x0 = self.branch0(x)
101
+ x1 = self.branch1(x)
102
+ x2 = self.branch2(x)
103
+ out = torch.cat((x0, x1, x2), 1)
104
+ return out
105
+
106
+
107
+ class Block17(nn.Module):
108
+ def __init__(self, scale=1.0, conv_block=None):
109
+ super(Block17, self).__init__()
110
+ self.scale = scale
111
+ conv_block = conv_block or ConvNormAct
112
+
113
+ self.branch0 = conv_block(1088, 192, kernel_size=1, stride=1)
114
+
115
+ self.branch1 = nn.Sequential(
116
+ conv_block(1088, 128, kernel_size=1, stride=1),
117
+ conv_block(128, 160, kernel_size=(1, 7), stride=1, padding=(0, 3)),
118
+ conv_block(160, 192, kernel_size=(7, 1), stride=1, padding=(3, 0))
119
+ )
120
+
121
+ self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)
122
+ self.act = nn.ReLU()
123
+
124
+ def forward(self, x):
125
+ x0 = self.branch0(x)
126
+ x1 = self.branch1(x)
127
+ out = torch.cat((x0, x1), 1)
128
+ out = self.conv2d(out)
129
+ out = out * self.scale + x
130
+ out = self.act(out)
131
+ return out
132
+
133
+
134
+ class Mixed_7a(nn.Module):
135
+ def __init__(self, conv_block=None):
136
+ super(Mixed_7a, self).__init__()
137
+ conv_block = conv_block or ConvNormAct
138
+
139
+ self.branch0 = nn.Sequential(
140
+ conv_block(1088, 256, kernel_size=1, stride=1),
141
+ conv_block(256, 384, kernel_size=3, stride=2)
142
+ )
143
+
144
+ self.branch1 = nn.Sequential(
145
+ conv_block(1088, 256, kernel_size=1, stride=1),
146
+ conv_block(256, 288, kernel_size=3, stride=2)
147
+ )
148
+
149
+ self.branch2 = nn.Sequential(
150
+ conv_block(1088, 256, kernel_size=1, stride=1),
151
+ conv_block(256, 288, kernel_size=3, stride=1, padding=1),
152
+ conv_block(288, 320, kernel_size=3, stride=2)
153
+ )
154
+
155
+ self.branch3 = nn.MaxPool2d(3, stride=2)
156
+
157
+ def forward(self, x):
158
+ x0 = self.branch0(x)
159
+ x1 = self.branch1(x)
160
+ x2 = self.branch2(x)
161
+ x3 = self.branch3(x)
162
+ out = torch.cat((x0, x1, x2, x3), 1)
163
+ return out
164
+
165
+
166
+ class Block8(nn.Module):
167
+
168
+ def __init__(self, scale=1.0, no_relu=False, conv_block=None):
169
+ super(Block8, self).__init__()
170
+ self.scale = scale
171
+ conv_block = conv_block or ConvNormAct
172
+
173
+ self.branch0 = conv_block(2080, 192, kernel_size=1, stride=1)
174
+
175
+ self.branch1 = nn.Sequential(
176
+ conv_block(2080, 192, kernel_size=1, stride=1),
177
+ conv_block(192, 224, kernel_size=(1, 3), stride=1, padding=(0, 1)),
178
+ conv_block(224, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
179
+ )
180
+
181
+ self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)
182
+ self.relu = None if no_relu else nn.ReLU()
183
+
184
+ def forward(self, x):
185
+ x0 = self.branch0(x)
186
+ x1 = self.branch1(x)
187
+ out = torch.cat((x0, x1), 1)
188
+ out = self.conv2d(out)
189
+ out = out * self.scale + x
190
+ if self.relu is not None:
191
+ out = self.relu(out)
192
+ return out
193
+
194
+
195
+ class InceptionResnetV2(nn.Module):
196
+ def __init__(
197
+ self,
198
+ num_classes=1000,
199
+ in_chans=3,
200
+ drop_rate=0.,
201
+ output_stride=32,
202
+ global_pool='avg',
203
+ norm_layer='batchnorm2d',
204
+ norm_eps=1e-3,
205
+ act_layer='relu',
206
+ ):
207
+ super(InceptionResnetV2, self).__init__()
208
+ self.num_classes = num_classes
209
+ self.num_features = self.head_hidden_size = 1536
210
+ assert output_stride == 32
211
+ conv_block = partial(
212
+ ConvNormAct,
213
+ padding=0,
214
+ norm_layer=norm_layer,
215
+ act_layer=act_layer,
216
+ norm_kwargs=dict(eps=norm_eps),
217
+ act_kwargs=dict(inplace=True),
218
+ )
219
+
220
+ self.conv2d_1a = conv_block(in_chans, 32, kernel_size=3, stride=2)
221
+ self.conv2d_2a = conv_block(32, 32, kernel_size=3, stride=1)
222
+ self.conv2d_2b = conv_block(32, 64, kernel_size=3, stride=1, padding=1)
223
+ self.feature_info = [dict(num_chs=64, reduction=2, module='conv2d_2b')]
224
+
225
+ self.maxpool_3a = nn.MaxPool2d(3, stride=2)
226
+ self.conv2d_3b = conv_block(64, 80, kernel_size=1, stride=1)
227
+ self.conv2d_4a = conv_block(80, 192, kernel_size=3, stride=1)
228
+ self.feature_info += [dict(num_chs=192, reduction=4, module='conv2d_4a')]
229
+
230
+ self.maxpool_5a = nn.MaxPool2d(3, stride=2)
231
+ self.mixed_5b = Mixed_5b(conv_block=conv_block)
232
+ self.repeat = nn.Sequential(*[Block35(scale=0.17, conv_block=conv_block) for _ in range(10)])
233
+ self.feature_info += [dict(num_chs=320, reduction=8, module='repeat')]
234
+
235
+ self.mixed_6a = Mixed_6a(conv_block=conv_block)
236
+ self.repeat_1 = nn.Sequential(*[Block17(scale=0.10, conv_block=conv_block) for _ in range(20)])
237
+ self.feature_info += [dict(num_chs=1088, reduction=16, module='repeat_1')]
238
+
239
+ self.mixed_7a = Mixed_7a(conv_block=conv_block)
240
+ self.repeat_2 = nn.Sequential(*[Block8(scale=0.20, conv_block=conv_block) for _ in range(9)])
241
+
242
+ self.block8 = Block8(no_relu=True, conv_block=conv_block)
243
+ self.conv2d_7b = conv_block(2080, self.num_features, kernel_size=1, stride=1)
244
+ self.feature_info += [dict(num_chs=self.num_features, reduction=32, module='conv2d_7b')]
245
+
246
+ self.global_pool, self.head_drop, self.classif = create_classifier(
247
+ self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
248
+
249
+ @torch.jit.ignore
250
+ def group_matcher(self, coarse=False):
251
+ module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))}
252
+ module_map.pop(('classif',))
253
+
254
+ def _matcher(name):
255
+ if any([name.startswith(n) for n in ('conv2d_1', 'conv2d_2')]):
256
+ return 0
257
+ elif any([name.startswith(n) for n in ('conv2d_3', 'conv2d_4')]):
258
+ return 1
259
+ elif any([name.startswith(n) for n in ('block8', 'conv2d_7')]):
260
+ return len(module_map) + 1
261
+ else:
262
+ for k in module_map.keys():
263
+ if k == tuple(name.split('.')[:len(k)]):
264
+ return module_map[k]
265
+ return float('inf')
266
+ return _matcher
267
+
268
+ @torch.jit.ignore
269
+ def set_grad_checkpointing(self, enable=True):
270
+ assert not enable, "checkpointing not supported"
271
+
272
+ @torch.jit.ignore
273
+ def get_classifier(self) -> nn.Module:
274
+ return self.classif
275
+
276
+ def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
277
+ self.num_classes = num_classes
278
+ self.global_pool, self.classif = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
279
+
280
+ def forward_features(self, x):
281
+ x = self.conv2d_1a(x)
282
+ x = self.conv2d_2a(x)
283
+ x = self.conv2d_2b(x)
284
+ x = self.maxpool_3a(x)
285
+ x = self.conv2d_3b(x)
286
+ x = self.conv2d_4a(x)
287
+ x = self.maxpool_5a(x)
288
+ x = self.mixed_5b(x)
289
+ x = self.repeat(x)
290
+ x = self.mixed_6a(x)
291
+ x = self.repeat_1(x)
292
+ x = self.mixed_7a(x)
293
+ x = self.repeat_2(x)
294
+ x = self.block8(x)
295
+ x = self.conv2d_7b(x)
296
+ return x
297
+
298
+ def forward_head(self, x, pre_logits: bool = False):
299
+ x = self.global_pool(x)
300
+ x = self.head_drop(x)
301
+ return x if pre_logits else self.classif(x)
302
+
303
+ def forward(self, x):
304
+ x = self.forward_features(x)
305
+ x = self.forward_head(x)
306
+ return x
307
+
308
+
309
+ def _create_inception_resnet_v2(variant, pretrained=False, **kwargs):
310
+ return build_model_with_cfg(InceptionResnetV2, variant, pretrained, **kwargs)
311
+
312
+
313
+ default_cfgs = generate_default_cfgs({
314
+ # ported from http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz
315
+ 'inception_resnet_v2.tf_in1k': {
316
+ 'hf_hub_id': 'timm/',
317
+ 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
318
+ 'crop_pct': 0.8975, 'interpolation': 'bicubic',
319
+ 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
320
+ 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
321
+ },
322
+ # As per https://arxiv.org/abs/1705.07204 and
323
+ # ported from http://download.tensorflow.org/models/ens_adv_inception_resnet_v2_2017_08_18.tar.gz
324
+ 'inception_resnet_v2.tf_ens_adv_in1k': {
325
+ 'hf_hub_id': 'timm/',
326
+ 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
327
+ 'crop_pct': 0.8975, 'interpolation': 'bicubic',
328
+ 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
329
+ 'first_conv': 'conv2d_1a.conv', 'classifier': 'classif',
330
+ }
331
+ })
332
+
333
+
334
+ @register_model
335
+ def inception_resnet_v2(pretrained=False, **kwargs) -> InceptionResnetV2:
336
+ return _create_inception_resnet_v2('inception_resnet_v2', pretrained=pretrained, **kwargs)
337
+
338
+
339
+ register_model_deprecations(__name__, {
340
+ 'ens_adv_inception_resnet_v2': 'inception_resnet_v2.tf_ens_adv_in1k',
341
+ })
pytorch-image-models/timm/models/inception_v3.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Inception-V3
2
+
3
+ Originally from torchvision Inception3 model
4
+ Licensed BSD-Clause 3 https://github.com/pytorch/vision/blob/master/LICENSE
5
+ """
6
+ from functools import partial
7
+ from typing import Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
14
+ from timm.layers import trunc_normal_, create_classifier, Linear, ConvNormAct
15
+ from ._builder import build_model_with_cfg
16
+ from ._builder import resolve_pretrained_cfg
17
+ from ._manipulate import flatten_modules
18
+ from ._registry import register_model, generate_default_cfgs, register_model_deprecations
19
+
20
+ __all__ = ['InceptionV3'] # model_registry will add each entrypoint fn to this
21
+
22
+
23
+ class InceptionA(nn.Module):
24
+
25
+ def __init__(self, in_channels, pool_features, conv_block=None):
26
+ super(InceptionA, self).__init__()
27
+ conv_block = conv_block or ConvNormAct
28
+ self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
29
+
30
+ self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
31
+ self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
32
+
33
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
34
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
35
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
36
+
37
+ self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
38
+
39
+ def _forward(self, x):
40
+ branch1x1 = self.branch1x1(x)
41
+
42
+ branch5x5 = self.branch5x5_1(x)
43
+ branch5x5 = self.branch5x5_2(branch5x5)
44
+
45
+ branch3x3dbl = self.branch3x3dbl_1(x)
46
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
47
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
48
+
49
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
50
+ branch_pool = self.branch_pool(branch_pool)
51
+
52
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
53
+ return outputs
54
+
55
+ def forward(self, x):
56
+ outputs = self._forward(x)
57
+ return torch.cat(outputs, 1)
58
+
59
+
60
+ class InceptionB(nn.Module):
61
+
62
+ def __init__(self, in_channels, conv_block=None):
63
+ super(InceptionB, self).__init__()
64
+ conv_block = conv_block or ConvNormAct
65
+ self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
66
+
67
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
68
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
69
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
70
+
71
+ def _forward(self, x):
72
+ branch3x3 = self.branch3x3(x)
73
+
74
+ branch3x3dbl = self.branch3x3dbl_1(x)
75
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
76
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
77
+
78
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
79
+
80
+ outputs = [branch3x3, branch3x3dbl, branch_pool]
81
+ return outputs
82
+
83
+ def forward(self, x):
84
+ outputs = self._forward(x)
85
+ return torch.cat(outputs, 1)
86
+
87
+
88
+ class InceptionC(nn.Module):
89
+
90
+ def __init__(self, in_channels, channels_7x7, conv_block=None):
91
+ super(InceptionC, self).__init__()
92
+ conv_block = conv_block or ConvNormAct
93
+ self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
94
+
95
+ c7 = channels_7x7
96
+ self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
97
+ self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
98
+ self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
99
+
100
+ self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
101
+ self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
102
+ self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
103
+ self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
104
+ self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
105
+
106
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
107
+
108
+ def _forward(self, x):
109
+ branch1x1 = self.branch1x1(x)
110
+
111
+ branch7x7 = self.branch7x7_1(x)
112
+ branch7x7 = self.branch7x7_2(branch7x7)
113
+ branch7x7 = self.branch7x7_3(branch7x7)
114
+
115
+ branch7x7dbl = self.branch7x7dbl_1(x)
116
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
117
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
118
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
119
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
120
+
121
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
122
+ branch_pool = self.branch_pool(branch_pool)
123
+
124
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
125
+ return outputs
126
+
127
+ def forward(self, x):
128
+ outputs = self._forward(x)
129
+ return torch.cat(outputs, 1)
130
+
131
+
132
+ class InceptionD(nn.Module):
133
+
134
+ def __init__(self, in_channels, conv_block=None):
135
+ super(InceptionD, self).__init__()
136
+ conv_block = conv_block or ConvNormAct
137
+ self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
138
+ self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
139
+
140
+ self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
141
+ self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
142
+ self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
143
+ self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
144
+
145
+ def _forward(self, x):
146
+ branch3x3 = self.branch3x3_1(x)
147
+ branch3x3 = self.branch3x3_2(branch3x3)
148
+
149
+ branch7x7x3 = self.branch7x7x3_1(x)
150
+ branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
151
+ branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
152
+ branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
153
+
154
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
155
+ outputs = [branch3x3, branch7x7x3, branch_pool]
156
+ return outputs
157
+
158
+ def forward(self, x):
159
+ outputs = self._forward(x)
160
+ return torch.cat(outputs, 1)
161
+
162
+
163
+ class InceptionE(nn.Module):
164
+
165
+ def __init__(self, in_channels, conv_block=None):
166
+ super(InceptionE, self).__init__()
167
+ conv_block = conv_block or ConvNormAct
168
+ self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
169
+
170
+ self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
171
+ self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
172
+ self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
173
+
174
+ self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
175
+ self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
176
+ self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
177
+ self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
178
+
179
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
180
+
181
+ def _forward(self, x):
182
+ branch1x1 = self.branch1x1(x)
183
+
184
+ branch3x3 = self.branch3x3_1(x)
185
+ branch3x3 = [
186
+ self.branch3x3_2a(branch3x3),
187
+ self.branch3x3_2b(branch3x3),
188
+ ]
189
+ branch3x3 = torch.cat(branch3x3, 1)
190
+
191
+ branch3x3dbl = self.branch3x3dbl_1(x)
192
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
193
+ branch3x3dbl = [
194
+ self.branch3x3dbl_3a(branch3x3dbl),
195
+ self.branch3x3dbl_3b(branch3x3dbl),
196
+ ]
197
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
198
+
199
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
200
+ branch_pool = self.branch_pool(branch_pool)
201
+
202
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
203
+ return outputs
204
+
205
+ def forward(self, x):
206
+ outputs = self._forward(x)
207
+ return torch.cat(outputs, 1)
208
+
209
+
210
+ class InceptionAux(nn.Module):
211
+
212
+ def __init__(self, in_channels, num_classes, conv_block=None):
213
+ super(InceptionAux, self).__init__()
214
+ conv_block = conv_block or ConvNormAct
215
+ self.conv0 = conv_block(in_channels, 128, kernel_size=1)
216
+ self.conv1 = conv_block(128, 768, kernel_size=5)
217
+ self.conv1.stddev = 0.01
218
+ self.fc = Linear(768, num_classes)
219
+ self.fc.stddev = 0.001
220
+
221
+ def forward(self, x):
222
+ # N x 768 x 17 x 17
223
+ x = F.avg_pool2d(x, kernel_size=5, stride=3)
224
+ # N x 768 x 5 x 5
225
+ x = self.conv0(x)
226
+ # N x 128 x 5 x 5
227
+ x = self.conv1(x)
228
+ # N x 768 x 1 x 1
229
+ # Adaptive average pooling
230
+ x = F.adaptive_avg_pool2d(x, (1, 1))
231
+ # N x 768 x 1 x 1
232
+ x = torch.flatten(x, 1)
233
+ # N x 768
234
+ x = self.fc(x)
235
+ # N x 1000
236
+ return x
237
+
238
+
239
+ class InceptionV3(nn.Module):
240
+ """Inception-V3
241
+ """
242
+ aux_logits: torch.jit.Final[bool]
243
+
244
+ def __init__(
245
+ self,
246
+ num_classes=1000,
247
+ in_chans=3,
248
+ drop_rate=0.,
249
+ global_pool='avg',
250
+ aux_logits=False,
251
+ norm_layer='batchnorm2d',
252
+ norm_eps=1e-3,
253
+ act_layer='relu',
254
+ ):
255
+ super(InceptionV3, self).__init__()
256
+ self.num_classes = num_classes
257
+ self.aux_logits = aux_logits
258
+ conv_block = partial(
259
+ ConvNormAct,
260
+ padding=0,
261
+ norm_layer=norm_layer,
262
+ act_layer=act_layer,
263
+ norm_kwargs=dict(eps=norm_eps),
264
+ act_kwargs=dict(inplace=True),
265
+ )
266
+
267
+ self.Conv2d_1a_3x3 = conv_block(in_chans, 32, kernel_size=3, stride=2)
268
+ self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
269
+ self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
270
+ self.Pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
271
+ self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
272
+ self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
273
+ self.Pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
274
+ self.Mixed_5b = InceptionA(192, pool_features=32, conv_block=conv_block)
275
+ self.Mixed_5c = InceptionA(256, pool_features=64, conv_block=conv_block)
276
+ self.Mixed_5d = InceptionA(288, pool_features=64, conv_block=conv_block)
277
+ self.Mixed_6a = InceptionB(288, conv_block=conv_block)
278
+ self.Mixed_6b = InceptionC(768, channels_7x7=128, conv_block=conv_block)
279
+ self.Mixed_6c = InceptionC(768, channels_7x7=160, conv_block=conv_block)
280
+ self.Mixed_6d = InceptionC(768, channels_7x7=160, conv_block=conv_block)
281
+ self.Mixed_6e = InceptionC(768, channels_7x7=192, conv_block=conv_block)
282
+ if aux_logits:
283
+ self.AuxLogits = InceptionAux(768, num_classes, conv_block=conv_block)
284
+ else:
285
+ self.AuxLogits = None
286
+ self.Mixed_7a = InceptionD(768, conv_block=conv_block)
287
+ self.Mixed_7b = InceptionE(1280, conv_block=conv_block)
288
+ self.Mixed_7c = InceptionE(2048, conv_block=conv_block)
289
+ self.feature_info = [
290
+ dict(num_chs=64, reduction=2, module='Conv2d_2b_3x3'),
291
+ dict(num_chs=192, reduction=4, module='Conv2d_4a_3x3'),
292
+ dict(num_chs=288, reduction=8, module='Mixed_5d'),
293
+ dict(num_chs=768, reduction=16, module='Mixed_6e'),
294
+ dict(num_chs=2048, reduction=32, module='Mixed_7c'),
295
+ ]
296
+
297
+ self.num_features = self.head_hidden_size = 2048
298
+ self.global_pool, self.head_drop, self.fc = create_classifier(
299
+ self.num_features,
300
+ self.num_classes,
301
+ pool_type=global_pool,
302
+ drop_rate=drop_rate,
303
+ )
304
+
305
+ for m in self.modules():
306
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
307
+ stddev = m.stddev if hasattr(m, 'stddev') else 0.1
308
+ trunc_normal_(m.weight, std=stddev)
309
+ elif isinstance(m, nn.BatchNorm2d):
310
+ nn.init.constant_(m.weight, 1)
311
+ nn.init.constant_(m.bias, 0)
312
+
313
+ @torch.jit.ignore
314
+ def group_matcher(self, coarse=False):
315
+ module_map = {k: i for i, (k, _) in enumerate(flatten_modules(self.named_children(), prefix=()))}
316
+ module_map.pop(('fc',))
317
+
318
+ def _matcher(name):
319
+ if any([name.startswith(n) for n in ('Conv2d_1', 'Conv2d_2')]):
320
+ return 0
321
+ elif any([name.startswith(n) for n in ('Conv2d_3', 'Conv2d_4')]):
322
+ return 1
323
+ else:
324
+ for k in module_map.keys():
325
+ if k == tuple(name.split('.')[:len(k)]):
326
+ return module_map[k]
327
+ return float('inf')
328
+ return _matcher
329
+
330
+ @torch.jit.ignore
331
+ def set_grad_checkpointing(self, enable=True):
332
+ assert not enable, 'gradient checkpointing not supported'
333
+
334
+ @torch.jit.ignore
335
+ def get_classifier(self) -> nn.Module:
336
+ return self.fc
337
+
338
+ def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
339
+ self.num_classes = num_classes
340
+ self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
341
+
342
+ def forward_preaux(self, x):
343
+ x = self.Conv2d_1a_3x3(x) # N x 32 x 149 x 149
344
+ x = self.Conv2d_2a_3x3(x) # N x 32 x 147 x 147
345
+ x = self.Conv2d_2b_3x3(x) # N x 64 x 147 x 147
346
+ x = self.Pool1(x) # N x 64 x 73 x 73
347
+ x = self.Conv2d_3b_1x1(x) # N x 80 x 73 x 73
348
+ x = self.Conv2d_4a_3x3(x) # N x 192 x 71 x 71
349
+ x = self.Pool2(x) # N x 192 x 35 x 35
350
+ x = self.Mixed_5b(x) # N x 256 x 35 x 35
351
+ x = self.Mixed_5c(x) # N x 288 x 35 x 35
352
+ x = self.Mixed_5d(x) # N x 288 x 35 x 35
353
+ x = self.Mixed_6a(x) # N x 768 x 17 x 17
354
+ x = self.Mixed_6b(x) # N x 768 x 17 x 17
355
+ x = self.Mixed_6c(x) # N x 768 x 17 x 17
356
+ x = self.Mixed_6d(x) # N x 768 x 17 x 17
357
+ x = self.Mixed_6e(x) # N x 768 x 17 x 17
358
+ return x
359
+
360
+ def forward_postaux(self, x):
361
+ x = self.Mixed_7a(x) # N x 1280 x 8 x 8
362
+ x = self.Mixed_7b(x) # N x 2048 x 8 x 8
363
+ x = self.Mixed_7c(x) # N x 2048 x 8 x 8
364
+ return x
365
+
366
+ def forward_features(self, x):
367
+ x = self.forward_preaux(x)
368
+ if self.aux_logits:
369
+ aux = self.AuxLogits(x)
370
+ x = self.forward_postaux(x)
371
+ return x, aux
372
+ x = self.forward_postaux(x)
373
+ return x
374
+
375
+ def forward_head(self, x, pre_logits: bool = False):
376
+ x = self.global_pool(x)
377
+ x = self.head_drop(x)
378
+ if pre_logits:
379
+ return x
380
+ x = self.fc(x)
381
+ return x
382
+
383
+ def forward(self, x):
384
+ if self.aux_logits:
385
+ x, aux = self.forward_features(x)
386
+ x = self.forward_head(x)
387
+ return x, aux
388
+ x = self.forward_features(x)
389
+ x = self.forward_head(x)
390
+ return x
391
+
392
+
393
+ def _create_inception_v3(variant, pretrained=False, **kwargs):
394
+ pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None))
395
+ aux_logits = kwargs.get('aux_logits', False)
396
+ has_aux_logits = False
397
+ if pretrained_cfg:
398
+ # only torchvision pretrained weights have aux logits
399
+ has_aux_logits = pretrained_cfg.tag == 'tv_in1k'
400
+ if aux_logits:
401
+ assert not kwargs.pop('features_only', False)
402
+ load_strict = has_aux_logits
403
+ else:
404
+ load_strict = not has_aux_logits
405
+
406
+ return build_model_with_cfg(
407
+ InceptionV3,
408
+ variant,
409
+ pretrained,
410
+ pretrained_cfg=pretrained_cfg,
411
+ pretrained_strict=load_strict,
412
+ **kwargs,
413
+ )
414
+
415
+
416
+ def _cfg(url='', **kwargs):
417
+ return {
418
+ 'url': url,
419
+ 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
420
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
421
+ 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
422
+ 'first_conv': 'Conv2d_1a_3x3.conv', 'classifier': 'fc',
423
+ **kwargs
424
+ }
425
+
426
+
427
+ default_cfgs = generate_default_cfgs({
428
+ # original PyTorch weights, ported from Tensorflow but modified
429
+ 'inception_v3.tv_in1k': _cfg(
430
+ # NOTE checkpoint has aux logit layer weights
431
+ hf_hub_id='timm/',
432
+ url='https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'),
433
+ # my port of Tensorflow SLIM weights (http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz)
434
+ 'inception_v3.tf_in1k': _cfg(hf_hub_id='timm/'),
435
+ # my port of Tensorflow adversarially trained Inception V3 from
436
+ # http://download.tensorflow.org/models/adv_inception_v3_2017_08_18.tar.gz
437
+ 'inception_v3.tf_adv_in1k': _cfg(hf_hub_id='timm/'),
438
+ # from gluon pretrained models, best performing in terms of accuracy/loss metrics
439
+ # https://gluon-cv.mxnet.io/model_zoo/classification.html
440
+ 'inception_v3.gluon_in1k': _cfg(
441
+ hf_hub_id='timm/',
442
+ mean=IMAGENET_DEFAULT_MEAN, # also works well with inception defaults
443
+ std=IMAGENET_DEFAULT_STD, # also works well with inception defaults
444
+ )
445
+ })
446
+
447
+
448
+ @register_model
449
+ def inception_v3(pretrained=False, **kwargs) -> InceptionV3:
450
+ model = _create_inception_v3('inception_v3', pretrained=pretrained, **kwargs)
451
+ return model
452
+
453
+
454
+ register_model_deprecations(__name__, {
455
+ 'tf_inception_v3': 'inception_v3.tf_in1k',
456
+ 'adv_inception_v3': 'inception_v3.tf_adv_in1k',
457
+ 'gluon_inception_v3': 'inception_v3.gluon_in1k',
458
+ })
pytorch-image-models/timm/models/inception_v4.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Pytorch Inception-V4 implementation
2
+ Sourced from https://github.com/Cadene/tensorflow-model-zoo.torch (MIT License) which is
3
+ based upon Google's Tensorflow implementation and pretrained weights (Apache 2.0 License)
4
+ """
5
+ from functools import partial
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
11
+ from timm.layers import create_classifier, ConvNormAct
12
+ from ._builder import build_model_with_cfg
13
+ from ._registry import register_model, generate_default_cfgs
14
+
15
+ __all__ = ['InceptionV4']
16
+
17
+
18
+ class Mixed3a(nn.Module):
19
+ def __init__(self, conv_block=ConvNormAct):
20
+ super(Mixed3a, self).__init__()
21
+ self.maxpool = nn.MaxPool2d(3, stride=2)
22
+ self.conv = conv_block(64, 96, kernel_size=3, stride=2)
23
+
24
+ def forward(self, x):
25
+ x0 = self.maxpool(x)
26
+ x1 = self.conv(x)
27
+ out = torch.cat((x0, x1), 1)
28
+ return out
29
+
30
+
31
+ class Mixed4a(nn.Module):
32
+ def __init__(self, conv_block=ConvNormAct):
33
+ super(Mixed4a, self).__init__()
34
+
35
+ self.branch0 = nn.Sequential(
36
+ conv_block(160, 64, kernel_size=1, stride=1),
37
+ conv_block(64, 96, kernel_size=3, stride=1)
38
+ )
39
+
40
+ self.branch1 = nn.Sequential(
41
+ conv_block(160, 64, kernel_size=1, stride=1),
42
+ conv_block(64, 64, kernel_size=(1, 7), stride=1, padding=(0, 3)),
43
+ conv_block(64, 64, kernel_size=(7, 1), stride=1, padding=(3, 0)),
44
+ conv_block(64, 96, kernel_size=(3, 3), stride=1)
45
+ )
46
+
47
+ def forward(self, x):
48
+ x0 = self.branch0(x)
49
+ x1 = self.branch1(x)
50
+ out = torch.cat((x0, x1), 1)
51
+ return out
52
+
53
+
54
+ class Mixed5a(nn.Module):
55
+ def __init__(self, conv_block=ConvNormAct):
56
+ super(Mixed5a, self).__init__()
57
+ self.conv = conv_block(192, 192, kernel_size=3, stride=2)
58
+ self.maxpool = nn.MaxPool2d(3, stride=2)
59
+
60
+ def forward(self, x):
61
+ x0 = self.conv(x)
62
+ x1 = self.maxpool(x)
63
+ out = torch.cat((x0, x1), 1)
64
+ return out
65
+
66
+
67
+ class InceptionA(nn.Module):
68
+ def __init__(self, conv_block=ConvNormAct):
69
+ super(InceptionA, self).__init__()
70
+ self.branch0 = conv_block(384, 96, kernel_size=1, stride=1)
71
+
72
+ self.branch1 = nn.Sequential(
73
+ conv_block(384, 64, kernel_size=1, stride=1),
74
+ conv_block(64, 96, kernel_size=3, stride=1, padding=1)
75
+ )
76
+
77
+ self.branch2 = nn.Sequential(
78
+ conv_block(384, 64, kernel_size=1, stride=1),
79
+ conv_block(64, 96, kernel_size=3, stride=1, padding=1),
80
+ conv_block(96, 96, kernel_size=3, stride=1, padding=1)
81
+ )
82
+
83
+ self.branch3 = nn.Sequential(
84
+ nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
85
+ conv_block(384, 96, kernel_size=1, stride=1)
86
+ )
87
+
88
+ def forward(self, x):
89
+ x0 = self.branch0(x)
90
+ x1 = self.branch1(x)
91
+ x2 = self.branch2(x)
92
+ x3 = self.branch3(x)
93
+ out = torch.cat((x0, x1, x2, x3), 1)
94
+ return out
95
+
96
+
97
+ class ReductionA(nn.Module):
98
+ def __init__(self, conv_block=ConvNormAct):
99
+ super(ReductionA, self).__init__()
100
+ self.branch0 = conv_block(384, 384, kernel_size=3, stride=2)
101
+
102
+ self.branch1 = nn.Sequential(
103
+ conv_block(384, 192, kernel_size=1, stride=1),
104
+ conv_block(192, 224, kernel_size=3, stride=1, padding=1),
105
+ conv_block(224, 256, kernel_size=3, stride=2)
106
+ )
107
+
108
+ self.branch2 = nn.MaxPool2d(3, stride=2)
109
+
110
+ def forward(self, x):
111
+ x0 = self.branch0(x)
112
+ x1 = self.branch1(x)
113
+ x2 = self.branch2(x)
114
+ out = torch.cat((x0, x1, x2), 1)
115
+ return out
116
+
117
+
118
+ class InceptionB(nn.Module):
119
+ def __init__(self, conv_block=ConvNormAct):
120
+ super(InceptionB, self).__init__()
121
+ self.branch0 = conv_block(1024, 384, kernel_size=1, stride=1)
122
+
123
+ self.branch1 = nn.Sequential(
124
+ conv_block(1024, 192, kernel_size=1, stride=1),
125
+ conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
126
+ conv_block(224, 256, kernel_size=(7, 1), stride=1, padding=(3, 0))
127
+ )
128
+
129
+ self.branch2 = nn.Sequential(
130
+ conv_block(1024, 192, kernel_size=1, stride=1),
131
+ conv_block(192, 192, kernel_size=(7, 1), stride=1, padding=(3, 0)),
132
+ conv_block(192, 224, kernel_size=(1, 7), stride=1, padding=(0, 3)),
133
+ conv_block(224, 224, kernel_size=(7, 1), stride=1, padding=(3, 0)),
134
+ conv_block(224, 256, kernel_size=(1, 7), stride=1, padding=(0, 3))
135
+ )
136
+
137
+ self.branch3 = nn.Sequential(
138
+ nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
139
+ conv_block(1024, 128, kernel_size=1, stride=1)
140
+ )
141
+
142
+ def forward(self, x):
143
+ x0 = self.branch0(x)
144
+ x1 = self.branch1(x)
145
+ x2 = self.branch2(x)
146
+ x3 = self.branch3(x)
147
+ out = torch.cat((x0, x1, x2, x3), 1)
148
+ return out
149
+
150
+
151
+ class ReductionB(nn.Module):
152
+ def __init__(self, conv_block=ConvNormAct):
153
+ super(ReductionB, self).__init__()
154
+
155
+ self.branch0 = nn.Sequential(
156
+ conv_block(1024, 192, kernel_size=1, stride=1),
157
+ conv_block(192, 192, kernel_size=3, stride=2)
158
+ )
159
+
160
+ self.branch1 = nn.Sequential(
161
+ conv_block(1024, 256, kernel_size=1, stride=1),
162
+ conv_block(256, 256, kernel_size=(1, 7), stride=1, padding=(0, 3)),
163
+ conv_block(256, 320, kernel_size=(7, 1), stride=1, padding=(3, 0)),
164
+ conv_block(320, 320, kernel_size=3, stride=2)
165
+ )
166
+
167
+ self.branch2 = nn.MaxPool2d(3, stride=2)
168
+
169
+ def forward(self, x):
170
+ x0 = self.branch0(x)
171
+ x1 = self.branch1(x)
172
+ x2 = self.branch2(x)
173
+ out = torch.cat((x0, x1, x2), 1)
174
+ return out
175
+
176
+
177
+ class InceptionC(nn.Module):
178
+ def __init__(self, conv_block=ConvNormAct):
179
+ super(InceptionC, self).__init__()
180
+
181
+ self.branch0 = conv_block(1536, 256, kernel_size=1, stride=1)
182
+
183
+ self.branch1_0 = conv_block(1536, 384, kernel_size=1, stride=1)
184
+ self.branch1_1a = conv_block(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
185
+ self.branch1_1b = conv_block(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
186
+
187
+ self.branch2_0 = conv_block(1536, 384, kernel_size=1, stride=1)
188
+ self.branch2_1 = conv_block(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
189
+ self.branch2_2 = conv_block(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
190
+ self.branch2_3a = conv_block(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
191
+ self.branch2_3b = conv_block(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
192
+
193
+ self.branch3 = nn.Sequential(
194
+ nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),
195
+ conv_block(1536, 256, kernel_size=1, stride=1)
196
+ )
197
+
198
+ def forward(self, x):
199
+ x0 = self.branch0(x)
200
+
201
+ x1_0 = self.branch1_0(x)
202
+ x1_1a = self.branch1_1a(x1_0)
203
+ x1_1b = self.branch1_1b(x1_0)
204
+ x1 = torch.cat((x1_1a, x1_1b), 1)
205
+
206
+ x2_0 = self.branch2_0(x)
207
+ x2_1 = self.branch2_1(x2_0)
208
+ x2_2 = self.branch2_2(x2_1)
209
+ x2_3a = self.branch2_3a(x2_2)
210
+ x2_3b = self.branch2_3b(x2_2)
211
+ x2 = torch.cat((x2_3a, x2_3b), 1)
212
+
213
+ x3 = self.branch3(x)
214
+
215
+ out = torch.cat((x0, x1, x2, x3), 1)
216
+ return out
217
+
218
+
219
+ class InceptionV4(nn.Module):
220
+ def __init__(
221
+ self,
222
+ num_classes=1000,
223
+ in_chans=3,
224
+ output_stride=32,
225
+ drop_rate=0.,
226
+ global_pool='avg',
227
+ norm_layer='batchnorm2d',
228
+ norm_eps=1e-3,
229
+ act_layer='relu',
230
+ ):
231
+ super(InceptionV4, self).__init__()
232
+ assert output_stride == 32
233
+ self.num_classes = num_classes
234
+ self.num_features = self.head_hidden_size = 1536
235
+ conv_block = partial(
236
+ ConvNormAct,
237
+ padding=0,
238
+ norm_layer=norm_layer,
239
+ act_layer=act_layer,
240
+ norm_kwargs=dict(eps=norm_eps),
241
+ act_kwargs=dict(inplace=True),
242
+ )
243
+
244
+ features = [
245
+ conv_block(in_chans, 32, kernel_size=3, stride=2),
246
+ conv_block(32, 32, kernel_size=3, stride=1),
247
+ conv_block(32, 64, kernel_size=3, stride=1, padding=1),
248
+ Mixed3a(conv_block),
249
+ Mixed4a(conv_block),
250
+ Mixed5a(conv_block),
251
+ ]
252
+ features += [InceptionA(conv_block) for _ in range(4)]
253
+ features += [ReductionA(conv_block)] # Mixed6a
254
+ features += [InceptionB(conv_block) for _ in range(7)]
255
+ features += [ReductionB(conv_block)] # Mixed7a
256
+ features += [InceptionC(conv_block) for _ in range(3)]
257
+ self.features = nn.Sequential(*features)
258
+ self.feature_info = [
259
+ dict(num_chs=64, reduction=2, module='features.2'),
260
+ dict(num_chs=160, reduction=4, module='features.3'),
261
+ dict(num_chs=384, reduction=8, module='features.9'),
262
+ dict(num_chs=1024, reduction=16, module='features.17'),
263
+ dict(num_chs=1536, reduction=32, module='features.21'),
264
+ ]
265
+ self.global_pool, self.head_drop, self.last_linear = create_classifier(
266
+ self.num_features, self.num_classes, pool_type=global_pool, drop_rate=drop_rate)
267
+
268
+ @torch.jit.ignore
269
+ def group_matcher(self, coarse=False):
270
+ return dict(
271
+ stem=r'^features\.[012]\.',
272
+ blocks=r'^features\.(\d+)'
273
+ )
274
+
275
+ @torch.jit.ignore
276
+ def set_grad_checkpointing(self, enable=True):
277
+ assert not enable, 'gradient checkpointing not supported'
278
+
279
+ @torch.jit.ignore
280
+ def get_classifier(self) -> nn.Module:
281
+ return self.last_linear
282
+
283
+ def reset_classifier(self, num_classes: int, global_pool: str = 'avg'):
284
+ self.num_classes = num_classes
285
+ self.global_pool, self.last_linear = create_classifier(
286
+ self.num_features, self.num_classes, pool_type=global_pool)
287
+
288
+ def forward_features(self, x):
289
+ return self.features(x)
290
+
291
+ def forward_head(self, x, pre_logits: bool = False):
292
+ x = self.global_pool(x)
293
+ x = self.head_drop(x)
294
+ return x if pre_logits else self.last_linear(x)
295
+
296
+ def forward(self, x):
297
+ x = self.forward_features(x)
298
+ x = self.forward_head(x)
299
+ return x
300
+
301
+
302
+ def _create_inception_v4(variant, pretrained=False, **kwargs) -> InceptionV4:
303
+ return build_model_with_cfg(
304
+ InceptionV4,
305
+ variant,
306
+ pretrained,
307
+ feature_cfg=dict(flatten_sequential=True),
308
+ **kwargs,
309
+ )
310
+
311
+
312
+ default_cfgs = generate_default_cfgs({
313
+ 'inception_v4.tf_in1k': {
314
+ 'hf_hub_id': 'timm/',
315
+ 'num_classes': 1000, 'input_size': (3, 299, 299), 'pool_size': (8, 8),
316
+ 'crop_pct': 0.875, 'interpolation': 'bicubic',
317
+ 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
318
+ 'first_conv': 'features.0.conv', 'classifier': 'last_linear',
319
+ }
320
+ })
321
+
322
+
323
+ @register_model
324
+ def inception_v4(pretrained=False, **kwargs):
325
+ return _create_inception_v4('inception_v4', pretrained, **kwargs)
pytorch-image-models/timm/models/layers/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # NOTE timm.models.layers is DEPRECATED, please use timm.layers, this is here to reduce breakages in transition
2
+ from timm.layers.activations import *
3
+ from timm.layers.adaptive_avgmax_pool import \
4
+ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
5
+ from timm.layers.attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding
6
+ from timm.layers.blur_pool import BlurPool2d
7
+ from timm.layers.classifier import ClassifierHead, create_classifier
8
+ from timm.layers.cond_conv2d import CondConv2d, get_condconv_initializer
9
+ from timm.layers.config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
10
+ set_layer_config
11
+ from timm.layers.conv2d_same import Conv2dSame, conv2d_same
12
+ from timm.layers.conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct
13
+ from timm.layers.create_act import create_act_layer, get_act_layer, get_act_fn
14
+ from timm.layers.create_attn import get_attn, create_attn
15
+ from timm.layers.create_conv2d import create_conv2d
16
+ from timm.layers.create_norm import get_norm_layer, create_norm_layer
17
+ from timm.layers.create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer
18
+ from timm.layers.drop import DropBlock2d, DropPath, drop_block_2d, drop_path
19
+ from timm.layers.eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
20
+ from timm.layers.evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\
21
+ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a
22
+ from timm.layers.fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm
23
+ from timm.layers.filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d
24
+ from timm.layers.gather_excite import GatherExcite
25
+ from timm.layers.global_context import GlobalContext
26
+ from timm.layers.helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple
27
+ from timm.layers.inplace_abn import InplaceAbn
28
+ from timm.layers.linear import Linear
29
+ from timm.layers.mixed_conv2d import MixedConv2d
30
+ from timm.layers.mlp import Mlp, GluMlp, GatedMlp, ConvMlp
31
+ from timm.layers.non_local_attn import NonLocalAttn, BatNonLocalAttn
32
+ from timm.layers.norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d
33
+ from timm.layers.norm_act import BatchNormAct2d, GroupNormAct, convert_sync_batchnorm
34
+ from timm.layers.padding import get_padding, get_same_padding, pad_same
35
+ from timm.layers.patch_embed import PatchEmbed
36
+ from timm.layers.pool2d_same import AvgPool2dSame, create_pool2d
37
+ from timm.layers.squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
38
+ from timm.layers.selective_kernel import SelectiveKernel
39
+ from timm.layers.separable_conv import SeparableConv2d, SeparableConvNormAct
40
+ from timm.layers.split_attn import SplitAttn
41
+ from timm.layers.split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
42
+ from timm.layers.std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
43
+ from timm.layers.test_time_pool import TestTimePoolHead, apply_test_time_pool
44
+ from timm.layers.trace_utils import _assert, _float_to_int
45
+ from timm.layers.weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_
46
+
47
+ import warnings
48
+ warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.layers", FutureWarning)
pytorch-image-models/timm/models/levit.py ADDED
@@ -0,0 +1,997 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ LeViT
2
+
3
+ Paper: `LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference`
4
+ - https://arxiv.org/abs/2104.01136
5
+
6
+ @article{graham2021levit,
7
+ title={LeViT: a Vision Transformer in ConvNet's Clothing for Faster Inference},
8
+ author={Benjamin Graham and Alaaeldin El-Nouby and Hugo Touvron and Pierre Stock and Armand Joulin and Herv\'e J\'egou and Matthijs Douze},
9
+ journal={arXiv preprint arXiv:22104.01136},
10
+ year={2021}
11
+ }
12
+
13
+ Adapted from official impl at https://github.com/facebookresearch/LeViT, original copyright bellow.
14
+
15
+ This version combines both conv/linear models and fixes torchscript compatibility.
16
+
17
+ Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
18
+ """
19
+
20
+ # Copyright (c) 2015-present, Facebook, Inc.
21
+ # All rights reserved.
22
+
23
+ # Modified from
24
+ # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
25
+ # Copyright 2020 Ross Wightman, Apache-2.0 License
26
+ from collections import OrderedDict
27
+ from functools import partial
28
+ from typing import Dict, List, Optional, Tuple, Union
29
+
30
+ import torch
31
+ import torch.nn as nn
32
+
33
+ from timm.data import IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN
34
+ from timm.layers import to_ntuple, to_2tuple, get_act_layer, DropPath, trunc_normal_, ndgrid
35
+ from ._builder import build_model_with_cfg
36
+ from ._features import feature_take_indices
37
+ from ._manipulate import checkpoint_seq
38
+ from ._registry import generate_default_cfgs, register_model
39
+
40
+ __all__ = ['Levit']
41
+
42
+
43
+ class ConvNorm(nn.Module):
44
+ def __init__(
45
+ self, in_chs, out_chs, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bn_weight_init=1):
46
+ super().__init__()
47
+ self.linear = nn.Conv2d(in_chs, out_chs, kernel_size, stride, padding, dilation, groups, bias=False)
48
+ self.bn = nn.BatchNorm2d(out_chs)
49
+
50
+ nn.init.constant_(self.bn.weight, bn_weight_init)
51
+
52
+ @torch.no_grad()
53
+ def fuse(self):
54
+ c, bn = self.linear, self.bn
55
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
56
+ w = c.weight * w[:, None, None, None]
57
+ b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
58
+ m = nn.Conv2d(
59
+ w.size(1), w.size(0), w.shape[2:], stride=self.linear.stride,
60
+ padding=self.linear.padding, dilation=self.linear.dilation, groups=self.linear.groups)
61
+ m.weight.data.copy_(w)
62
+ m.bias.data.copy_(b)
63
+ return m
64
+
65
+ def forward(self, x):
66
+ return self.bn(self.linear(x))
67
+
68
+
69
+ class LinearNorm(nn.Module):
70
+ def __init__(self, in_features, out_features, bn_weight_init=1):
71
+ super().__init__()
72
+ self.linear = nn.Linear(in_features, out_features, bias=False)
73
+ self.bn = nn.BatchNorm1d(out_features)
74
+
75
+ nn.init.constant_(self.bn.weight, bn_weight_init)
76
+
77
+ @torch.no_grad()
78
+ def fuse(self):
79
+ l, bn = self.linear, self.bn
80
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
81
+ w = l.weight * w[:, None]
82
+ b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
83
+ m = nn.Linear(w.size(1), w.size(0))
84
+ m.weight.data.copy_(w)
85
+ m.bias.data.copy_(b)
86
+ return m
87
+
88
+ def forward(self, x):
89
+ x = self.linear(x)
90
+ return self.bn(x.flatten(0, 1)).reshape_as(x)
91
+
92
+
93
+ class NormLinear(nn.Module):
94
+ def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.):
95
+ super().__init__()
96
+ self.bn = nn.BatchNorm1d(in_features)
97
+ self.drop = nn.Dropout(drop)
98
+ self.linear = nn.Linear(in_features, out_features, bias=bias)
99
+
100
+ trunc_normal_(self.linear.weight, std=std)
101
+ if self.linear.bias is not None:
102
+ nn.init.constant_(self.linear.bias, 0)
103
+
104
+ @torch.no_grad()
105
+ def fuse(self):
106
+ bn, l = self.bn, self.linear
107
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
108
+ b = bn.bias - self.bn.running_mean * self.bn.weight / (bn.running_var + bn.eps) ** 0.5
109
+ w = l.weight * w[None, :]
110
+ if l.bias is None:
111
+ b = b @ self.linear.weight.T
112
+ else:
113
+ b = (l.weight @ b[:, None]).view(-1) + self.linear.bias
114
+ m = nn.Linear(w.size(1), w.size(0))
115
+ m.weight.data.copy_(w)
116
+ m.bias.data.copy_(b)
117
+ return m
118
+
119
+ def forward(self, x):
120
+ return self.linear(self.drop(self.bn(x)))
121
+
122
+
123
+ class Stem8(nn.Sequential):
124
+ def __init__(self, in_chs, out_chs, act_layer):
125
+ super().__init__()
126
+ self.stride = 8
127
+
128
+ self.add_module('conv1', ConvNorm(in_chs, out_chs // 4, 3, stride=2, padding=1))
129
+ self.add_module('act1', act_layer())
130
+ self.add_module('conv2', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1))
131
+ self.add_module('act2', act_layer())
132
+ self.add_module('conv3', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1))
133
+
134
+
135
+ class Stem16(nn.Sequential):
136
+ def __init__(self, in_chs, out_chs, act_layer):
137
+ super().__init__()
138
+ self.stride = 16
139
+
140
+ self.add_module('conv1', ConvNorm(in_chs, out_chs // 8, 3, stride=2, padding=1))
141
+ self.add_module('act1', act_layer())
142
+ self.add_module('conv2', ConvNorm(out_chs // 8, out_chs // 4, 3, stride=2, padding=1))
143
+ self.add_module('act2', act_layer())
144
+ self.add_module('conv3', ConvNorm(out_chs // 4, out_chs // 2, 3, stride=2, padding=1))
145
+ self.add_module('act3', act_layer())
146
+ self.add_module('conv4', ConvNorm(out_chs // 2, out_chs, 3, stride=2, padding=1))
147
+
148
+
149
+ class Downsample(nn.Module):
150
+ def __init__(self, stride, resolution, use_pool=False):
151
+ super().__init__()
152
+ self.stride = stride
153
+ self.resolution = to_2tuple(resolution)
154
+ self.pool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False) if use_pool else None
155
+
156
+ def forward(self, x):
157
+ B, N, C = x.shape
158
+ x = x.view(B, self.resolution[0], self.resolution[1], C)
159
+ if self.pool is not None:
160
+ x = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
161
+ else:
162
+ x = x[:, ::self.stride, ::self.stride]
163
+ return x.reshape(B, -1, C)
164
+
165
+
166
+ class Attention(nn.Module):
167
+ attention_bias_cache: Dict[str, torch.Tensor]
168
+
169
+ def __init__(
170
+ self,
171
+ dim,
172
+ key_dim,
173
+ num_heads=8,
174
+ attn_ratio=4.,
175
+ resolution=14,
176
+ use_conv=False,
177
+ act_layer=nn.SiLU,
178
+ ):
179
+ super().__init__()
180
+ ln_layer = ConvNorm if use_conv else LinearNorm
181
+ resolution = to_2tuple(resolution)
182
+
183
+ self.use_conv = use_conv
184
+ self.num_heads = num_heads
185
+ self.scale = key_dim ** -0.5
186
+ self.key_dim = key_dim
187
+ self.key_attn_dim = key_dim * num_heads
188
+ self.val_dim = int(attn_ratio * key_dim)
189
+ self.val_attn_dim = int(attn_ratio * key_dim) * num_heads
190
+
191
+ self.qkv = ln_layer(dim, self.val_attn_dim + self.key_attn_dim * 2)
192
+ self.proj = nn.Sequential(OrderedDict([
193
+ ('act', act_layer()),
194
+ ('ln', ln_layer(self.val_attn_dim, dim, bn_weight_init=0))
195
+ ]))
196
+
197
+ self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
198
+ pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
199
+ rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
200
+ rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
201
+ self.register_buffer('attention_bias_idxs', rel_pos, persistent=False)
202
+ self.attention_bias_cache = {}
203
+
204
+ @torch.no_grad()
205
+ def train(self, mode=True):
206
+ super().train(mode)
207
+ if mode and self.attention_bias_cache:
208
+ self.attention_bias_cache = {} # clear ab cache
209
+
210
+ def get_attention_biases(self, device: torch.device) -> torch.Tensor:
211
+ if torch.jit.is_tracing() or self.training:
212
+ return self.attention_biases[:, self.attention_bias_idxs]
213
+ else:
214
+ device_key = str(device)
215
+ if device_key not in self.attention_bias_cache:
216
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
217
+ return self.attention_bias_cache[device_key]
218
+
219
+ def forward(self, x): # x (B,C,H,W)
220
+ if self.use_conv:
221
+ B, C, H, W = x.shape
222
+ q, k, v = self.qkv(x).view(
223
+ B, self.num_heads, -1, H * W).split([self.key_dim, self.key_dim, self.val_dim], dim=2)
224
+
225
+ attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
226
+ attn = attn.softmax(dim=-1)
227
+
228
+ x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
229
+ else:
230
+ B, N, C = x.shape
231
+ q, k, v = self.qkv(x).view(
232
+ B, N, self.num_heads, -1).split([self.key_dim, self.key_dim, self.val_dim], dim=3)
233
+ q = q.permute(0, 2, 1, 3)
234
+ k = k.permute(0, 2, 3, 1)
235
+ v = v.permute(0, 2, 1, 3)
236
+
237
+ attn = q @ k * self.scale + self.get_attention_biases(x.device)
238
+ attn = attn.softmax(dim=-1)
239
+
240
+ x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim)
241
+ x = self.proj(x)
242
+ return x
243
+
244
+
245
+ class AttentionDownsample(nn.Module):
246
+ attention_bias_cache: Dict[str, torch.Tensor]
247
+
248
+ def __init__(
249
+ self,
250
+ in_dim,
251
+ out_dim,
252
+ key_dim,
253
+ num_heads=8,
254
+ attn_ratio=2.0,
255
+ stride=2,
256
+ resolution=14,
257
+ use_conv=False,
258
+ use_pool=False,
259
+ act_layer=nn.SiLU,
260
+ ):
261
+ super().__init__()
262
+ resolution = to_2tuple(resolution)
263
+
264
+ self.stride = stride
265
+ self.resolution = resolution
266
+ self.num_heads = num_heads
267
+ self.key_dim = key_dim
268
+ self.key_attn_dim = key_dim * num_heads
269
+ self.val_dim = int(attn_ratio * key_dim)
270
+ self.val_attn_dim = self.val_dim * self.num_heads
271
+ self.scale = key_dim ** -0.5
272
+ self.use_conv = use_conv
273
+
274
+ if self.use_conv:
275
+ ln_layer = ConvNorm
276
+ sub_layer = partial(
277
+ nn.AvgPool2d,
278
+ kernel_size=3 if use_pool else 1, padding=1 if use_pool else 0, count_include_pad=False)
279
+ else:
280
+ ln_layer = LinearNorm
281
+ sub_layer = partial(Downsample, resolution=resolution, use_pool=use_pool)
282
+
283
+ self.kv = ln_layer(in_dim, self.val_attn_dim + self.key_attn_dim)
284
+ self.q = nn.Sequential(OrderedDict([
285
+ ('down', sub_layer(stride=stride)),
286
+ ('ln', ln_layer(in_dim, self.key_attn_dim))
287
+ ]))
288
+ self.proj = nn.Sequential(OrderedDict([
289
+ ('act', act_layer()),
290
+ ('ln', ln_layer(self.val_attn_dim, out_dim))
291
+ ]))
292
+
293
+ self.attention_biases = nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
294
+ k_pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
295
+ q_pos = torch.stack(ndgrid(
296
+ torch.arange(0, resolution[0], step=stride),
297
+ torch.arange(0, resolution[1], step=stride)
298
+ )).flatten(1)
299
+ rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs()
300
+ rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
301
+ self.register_buffer('attention_bias_idxs', rel_pos, persistent=False)
302
+
303
+ self.attention_bias_cache = {} # per-device attention_biases cache
304
+
305
+ @torch.no_grad()
306
+ def train(self, mode=True):
307
+ super().train(mode)
308
+ if mode and self.attention_bias_cache:
309
+ self.attention_bias_cache = {} # clear ab cache
310
+
311
+ def get_attention_biases(self, device: torch.device) -> torch.Tensor:
312
+ if torch.jit.is_tracing() or self.training:
313
+ return self.attention_biases[:, self.attention_bias_idxs]
314
+ else:
315
+ device_key = str(device)
316
+ if device_key not in self.attention_bias_cache:
317
+ self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
318
+ return self.attention_bias_cache[device_key]
319
+
320
+ def forward(self, x):
321
+ if self.use_conv:
322
+ B, C, H, W = x.shape
323
+ HH, WW = (H - 1) // self.stride + 1, (W - 1) // self.stride + 1
324
+ k, v = self.kv(x).view(B, self.num_heads, -1, H * W).split([self.key_dim, self.val_dim], dim=2)
325
+ q = self.q(x).view(B, self.num_heads, self.key_dim, -1)
326
+
327
+ attn = (q.transpose(-2, -1) @ k) * self.scale + self.get_attention_biases(x.device)
328
+ attn = attn.softmax(dim=-1)
329
+
330
+ x = (v @ attn.transpose(-2, -1)).reshape(B, self.val_attn_dim, HH, WW)
331
+ else:
332
+ B, N, C = x.shape
333
+ k, v = self.kv(x).view(B, N, self.num_heads, -1).split([self.key_dim, self.val_dim], dim=3)
334
+ k = k.permute(0, 2, 3, 1) # BHCN
335
+ v = v.permute(0, 2, 1, 3) # BHNC
336
+ q = self.q(x).view(B, -1, self.num_heads, self.key_dim).permute(0, 2, 1, 3)
337
+
338
+ attn = q @ k * self.scale + self.get_attention_biases(x.device)
339
+ attn = attn.softmax(dim=-1)
340
+
341
+ x = (attn @ v).transpose(1, 2).reshape(B, -1, self.val_attn_dim)
342
+ x = self.proj(x)
343
+ return x
344
+
345
+
346
+ class LevitMlp(nn.Module):
347
+ """ MLP for Levit w/ normalization + ability to switch btw conv and linear
348
+ """
349
+ def __init__(
350
+ self,
351
+ in_features,
352
+ hidden_features=None,
353
+ out_features=None,
354
+ use_conv=False,
355
+ act_layer=nn.SiLU,
356
+ drop=0.
357
+ ):
358
+ super().__init__()
359
+ out_features = out_features or in_features
360
+ hidden_features = hidden_features or in_features
361
+ ln_layer = ConvNorm if use_conv else LinearNorm
362
+
363
+ self.ln1 = ln_layer(in_features, hidden_features)
364
+ self.act = act_layer()
365
+ self.drop = nn.Dropout(drop)
366
+ self.ln2 = ln_layer(hidden_features, out_features, bn_weight_init=0)
367
+
368
+ def forward(self, x):
369
+ x = self.ln1(x)
370
+ x = self.act(x)
371
+ x = self.drop(x)
372
+ x = self.ln2(x)
373
+ return x
374
+
375
+
376
+ class LevitDownsample(nn.Module):
377
+ def __init__(
378
+ self,
379
+ in_dim,
380
+ out_dim,
381
+ key_dim,
382
+ num_heads=8,
383
+ attn_ratio=4.,
384
+ mlp_ratio=2.,
385
+ act_layer=nn.SiLU,
386
+ attn_act_layer=None,
387
+ resolution=14,
388
+ use_conv=False,
389
+ use_pool=False,
390
+ drop_path=0.,
391
+ ):
392
+ super().__init__()
393
+ attn_act_layer = attn_act_layer or act_layer
394
+
395
+ self.attn_downsample = AttentionDownsample(
396
+ in_dim=in_dim,
397
+ out_dim=out_dim,
398
+ key_dim=key_dim,
399
+ num_heads=num_heads,
400
+ attn_ratio=attn_ratio,
401
+ act_layer=attn_act_layer,
402
+ resolution=resolution,
403
+ use_conv=use_conv,
404
+ use_pool=use_pool,
405
+ )
406
+
407
+ self.mlp = LevitMlp(
408
+ out_dim,
409
+ int(out_dim * mlp_ratio),
410
+ use_conv=use_conv,
411
+ act_layer=act_layer
412
+ )
413
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
414
+
415
+ def forward(self, x):
416
+ x = self.attn_downsample(x)
417
+ x = x + self.drop_path(self.mlp(x))
418
+ return x
419
+
420
+
421
+ class LevitBlock(nn.Module):
422
+ def __init__(
423
+ self,
424
+ dim,
425
+ key_dim,
426
+ num_heads=8,
427
+ attn_ratio=4.,
428
+ mlp_ratio=2.,
429
+ resolution=14,
430
+ use_conv=False,
431
+ act_layer=nn.SiLU,
432
+ attn_act_layer=None,
433
+ drop_path=0.,
434
+ ):
435
+ super().__init__()
436
+ attn_act_layer = attn_act_layer or act_layer
437
+
438
+ self.attn = Attention(
439
+ dim=dim,
440
+ key_dim=key_dim,
441
+ num_heads=num_heads,
442
+ attn_ratio=attn_ratio,
443
+ resolution=resolution,
444
+ use_conv=use_conv,
445
+ act_layer=attn_act_layer,
446
+ )
447
+ self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
448
+
449
+ self.mlp = LevitMlp(
450
+ dim,
451
+ int(dim * mlp_ratio),
452
+ use_conv=use_conv,
453
+ act_layer=act_layer
454
+ )
455
+ self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
456
+
457
+ def forward(self, x):
458
+ x = x + self.drop_path1(self.attn(x))
459
+ x = x + self.drop_path2(self.mlp(x))
460
+ return x
461
+
462
+
463
+ class LevitStage(nn.Module):
464
+ def __init__(
465
+ self,
466
+ in_dim,
467
+ out_dim,
468
+ key_dim,
469
+ depth=4,
470
+ num_heads=8,
471
+ attn_ratio=4.0,
472
+ mlp_ratio=4.0,
473
+ act_layer=nn.SiLU,
474
+ attn_act_layer=None,
475
+ resolution=14,
476
+ downsample='',
477
+ use_conv=False,
478
+ drop_path=0.,
479
+ ):
480
+ super().__init__()
481
+ resolution = to_2tuple(resolution)
482
+
483
+ if downsample:
484
+ self.downsample = LevitDownsample(
485
+ in_dim,
486
+ out_dim,
487
+ key_dim=key_dim,
488
+ num_heads=in_dim // key_dim,
489
+ attn_ratio=4.,
490
+ mlp_ratio=2.,
491
+ act_layer=act_layer,
492
+ attn_act_layer=attn_act_layer,
493
+ resolution=resolution,
494
+ use_conv=use_conv,
495
+ drop_path=drop_path,
496
+ )
497
+ resolution = [(r - 1) // 2 + 1 for r in resolution]
498
+ else:
499
+ assert in_dim == out_dim
500
+ self.downsample = nn.Identity()
501
+
502
+ blocks = []
503
+ for _ in range(depth):
504
+ blocks += [LevitBlock(
505
+ out_dim,
506
+ key_dim,
507
+ num_heads=num_heads,
508
+ attn_ratio=attn_ratio,
509
+ mlp_ratio=mlp_ratio,
510
+ act_layer=act_layer,
511
+ attn_act_layer=attn_act_layer,
512
+ resolution=resolution,
513
+ use_conv=use_conv,
514
+ drop_path=drop_path,
515
+ )]
516
+ self.blocks = nn.Sequential(*blocks)
517
+
518
+ def forward(self, x):
519
+ x = self.downsample(x)
520
+ x = self.blocks(x)
521
+ return x
522
+
523
+
524
+ class Levit(nn.Module):
525
+ """ Vision Transformer with support for patch or hybrid CNN input stage
526
+
527
+ NOTE: distillation is defaulted to True since pretrained weights use it, will cause problems
528
+ w/ train scripts that don't take tuple outputs,
529
+ """
530
+
531
+ def __init__(
532
+ self,
533
+ img_size=224,
534
+ in_chans=3,
535
+ num_classes=1000,
536
+ embed_dim=(192,),
537
+ key_dim=64,
538
+ depth=(12,),
539
+ num_heads=(3,),
540
+ attn_ratio=2.,
541
+ mlp_ratio=2.,
542
+ stem_backbone=None,
543
+ stem_stride=None,
544
+ stem_type='s16',
545
+ down_op='subsample',
546
+ act_layer='hard_swish',
547
+ attn_act_layer=None,
548
+ use_conv=False,
549
+ global_pool='avg',
550
+ drop_rate=0.,
551
+ drop_path_rate=0.):
552
+ super().__init__()
553
+ act_layer = get_act_layer(act_layer)
554
+ attn_act_layer = get_act_layer(attn_act_layer or act_layer)
555
+ self.use_conv = use_conv
556
+ self.num_classes = num_classes
557
+ self.global_pool = global_pool
558
+ self.num_features = self.head_hidden_size = embed_dim[-1]
559
+ self.embed_dim = embed_dim
560
+ self.drop_rate = drop_rate
561
+ self.grad_checkpointing = False
562
+ self.feature_info = []
563
+
564
+ num_stages = len(embed_dim)
565
+ assert len(depth) == num_stages
566
+ num_heads = to_ntuple(num_stages)(num_heads)
567
+ attn_ratio = to_ntuple(num_stages)(attn_ratio)
568
+ mlp_ratio = to_ntuple(num_stages)(mlp_ratio)
569
+
570
+ if stem_backbone is not None:
571
+ assert stem_stride >= 2
572
+ self.stem = stem_backbone
573
+ stride = stem_stride
574
+ else:
575
+ assert stem_type in ('s16', 's8')
576
+ if stem_type == 's16':
577
+ self.stem = Stem16(in_chans, embed_dim[0], act_layer=act_layer)
578
+ else:
579
+ self.stem = Stem8(in_chans, embed_dim[0], act_layer=act_layer)
580
+ stride = self.stem.stride
581
+ resolution = tuple([i // p for i, p in zip(to_2tuple(img_size), to_2tuple(stride))])
582
+
583
+ in_dim = embed_dim[0]
584
+ stages = []
585
+ for i in range(num_stages):
586
+ stage_stride = 2 if i > 0 else 1
587
+ stages += [LevitStage(
588
+ in_dim,
589
+ embed_dim[i],
590
+ key_dim,
591
+ depth=depth[i],
592
+ num_heads=num_heads[i],
593
+ attn_ratio=attn_ratio[i],
594
+ mlp_ratio=mlp_ratio[i],
595
+ act_layer=act_layer,
596
+ attn_act_layer=attn_act_layer,
597
+ resolution=resolution,
598
+ use_conv=use_conv,
599
+ downsample=down_op if stage_stride == 2 else '',
600
+ drop_path=drop_path_rate
601
+ )]
602
+ stride *= stage_stride
603
+ resolution = tuple([(r - 1) // stage_stride + 1 for r in resolution])
604
+ self.feature_info += [dict(num_chs=embed_dim[i], reduction=stride, module=f'stages.{i}')]
605
+ in_dim = embed_dim[i]
606
+ self.stages = nn.Sequential(*stages)
607
+
608
+ # Classifier head
609
+ self.head = NormLinear(embed_dim[-1], num_classes, drop=drop_rate) if num_classes > 0 else nn.Identity()
610
+
611
+ @torch.jit.ignore
612
+ def no_weight_decay(self):
613
+ return {x for x in self.state_dict().keys() if 'attention_biases' in x}
614
+
615
+ @torch.jit.ignore
616
+ def group_matcher(self, coarse=False):
617
+ matcher = dict(
618
+ stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
619
+ blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
620
+ )
621
+ return matcher
622
+
623
+ @torch.jit.ignore
624
+ def set_grad_checkpointing(self, enable=True):
625
+ self.grad_checkpointing = enable
626
+
627
+ @torch.jit.ignore
628
+ def get_classifier(self) -> nn.Module:
629
+ return self.head
630
+
631
+ def reset_classifier(self, num_classes: int , global_pool: Optional[str] = None):
632
+ self.num_classes = num_classes
633
+ if global_pool is not None:
634
+ self.global_pool = global_pool
635
+ self.head = NormLinear(
636
+ self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity()
637
+
638
+ def forward_intermediates(
639
+ self,
640
+ x: torch.Tensor,
641
+ indices: Optional[Union[int, List[int]]] = None,
642
+ norm: bool = False,
643
+ stop_early: bool = False,
644
+ output_fmt: str = 'NCHW',
645
+ intermediates_only: bool = False,
646
+ ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
647
+ """ Forward features that returns intermediates.
648
+
649
+ Args:
650
+ x: Input image tensor
651
+ indices: Take last n blocks if int, all if None, select matching indices if sequence
652
+ norm: Apply norm layer to compatible intermediates
653
+ stop_early: Stop iterating over blocks when last desired intermediate hit
654
+ output_fmt: Shape of intermediate feature outputs
655
+ intermediates_only: Only return intermediate features
656
+ Returns:
657
+
658
+ """
659
+ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
660
+ intermediates = []
661
+ take_indices, max_index = feature_take_indices(len(self.stages), indices)
662
+
663
+ # forward pass
664
+ x = self.stem(x)
665
+ B, C, H, W = x.shape
666
+ if not self.use_conv:
667
+ x = x.flatten(2).transpose(1, 2)
668
+
669
+ if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
670
+ stages = self.stages
671
+ else:
672
+ stages = self.stages[:max_index + 1]
673
+ for feat_idx, stage in enumerate(stages):
674
+ x = stage(x)
675
+ if feat_idx in take_indices:
676
+ if self.use_conv:
677
+ intermediates.append(x)
678
+ else:
679
+ intermediates.append(x.reshape(B, H, W, -1).permute(0, 3, 1, 2))
680
+ H = (H + 2 - 1) // 2
681
+ W = (W + 2 - 1) // 2
682
+
683
+ if intermediates_only:
684
+ return intermediates
685
+
686
+ return x, intermediates
687
+
688
+ def prune_intermediate_layers(
689
+ self,
690
+ indices: Union[int, List[int]] = 1,
691
+ prune_norm: bool = False,
692
+ prune_head: bool = True,
693
+ ):
694
+ """ Prune layers not required for specified intermediates.
695
+ """
696
+ take_indices, max_index = feature_take_indices(len(self.stages), indices)
697
+ self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
698
+ if prune_head:
699
+ self.reset_classifier(0, '')
700
+ return take_indices
701
+
702
+ def forward_features(self, x):
703
+ x = self.stem(x)
704
+ if not self.use_conv:
705
+ x = x.flatten(2).transpose(1, 2)
706
+ if self.grad_checkpointing and not torch.jit.is_scripting():
707
+ x = checkpoint_seq(self.stages, x)
708
+ else:
709
+ x = self.stages(x)
710
+ return x
711
+
712
+ def forward_head(self, x, pre_logits: bool = False):
713
+ if self.global_pool == 'avg':
714
+ x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1)
715
+ return x if pre_logits else self.head(x)
716
+
717
+ def forward(self, x):
718
+ x = self.forward_features(x)
719
+ x = self.forward_head(x)
720
+ return x
721
+
722
+
723
+ class LevitDistilled(Levit):
724
+ def __init__(self, *args, **kwargs):
725
+ super().__init__(*args, **kwargs)
726
+ self.head_dist = NormLinear(self.num_features, self.num_classes) if self.num_classes > 0 else nn.Identity()
727
+ self.distilled_training = False # must set this True to train w/ distillation token
728
+
729
+ @torch.jit.ignore
730
+ def get_classifier(self) -> nn.Module:
731
+ return self.head, self.head_dist
732
+
733
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
734
+ self.num_classes = num_classes
735
+ if global_pool is not None:
736
+ self.global_pool = global_pool
737
+ self.head = NormLinear(
738
+ self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else nn.Identity()
739
+ self.head_dist = NormLinear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
740
+
741
+ @torch.jit.ignore
742
+ def set_distilled_training(self, enable=True):
743
+ self.distilled_training = enable
744
+
745
+ def forward_head(self, x, pre_logits: bool = False):
746
+ if self.global_pool == 'avg':
747
+ x = x.mean(dim=(-2, -1)) if self.use_conv else x.mean(dim=1)
748
+ if pre_logits:
749
+ return x
750
+ x, x_dist = self.head(x), self.head_dist(x)
751
+ if self.distilled_training and self.training and not torch.jit.is_scripting():
752
+ # only return separate classification predictions when training in distilled mode
753
+ return x, x_dist
754
+ else:
755
+ # during standard train/finetune, inference average the classifier predictions
756
+ return (x + x_dist) / 2
757
+
758
+
759
+ def checkpoint_filter_fn(state_dict, model):
760
+ if 'model' in state_dict:
761
+ state_dict = state_dict['model']
762
+
763
+ # filter out attn biases, should not have been persistent
764
+ state_dict = {k: v for k, v in state_dict.items() if 'attention_bias_idxs' not in k}
765
+
766
+ D = model.state_dict()
767
+ out_dict = {}
768
+ for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()):
769
+ if va.ndim == 4 and vb.ndim == 2:
770
+ vb = vb[:, :, None, None]
771
+ if va.shape != vb.shape:
772
+ # head or first-conv shapes may change for fine-tune
773
+ assert 'head' in ka or 'stem.conv1.linear' in ka
774
+ out_dict[ka] = vb
775
+
776
+ return out_dict
777
+
778
+
779
+ model_cfgs = dict(
780
+ levit_128s=dict(
781
+ embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 6, 8), depth=(2, 3, 4)),
782
+ levit_128=dict(
783
+ embed_dim=(128, 256, 384), key_dim=16, num_heads=(4, 8, 12), depth=(4, 4, 4)),
784
+ levit_192=dict(
785
+ embed_dim=(192, 288, 384), key_dim=32, num_heads=(3, 5, 6), depth=(4, 4, 4)),
786
+ levit_256=dict(
787
+ embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 4, 4)),
788
+ levit_384=dict(
789
+ embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4)),
790
+
791
+ # stride-8 stem experiments
792
+ levit_384_s8=dict(
793
+ embed_dim=(384, 512, 768), key_dim=32, num_heads=(6, 9, 12), depth=(4, 4, 4),
794
+ act_layer='silu', stem_type='s8'),
795
+ levit_512_s8=dict(
796
+ embed_dim=(512, 640, 896), key_dim=64, num_heads=(8, 10, 14), depth=(4, 4, 4),
797
+ act_layer='silu', stem_type='s8'),
798
+
799
+ # wider experiments
800
+ levit_512=dict(
801
+ embed_dim=(512, 768, 1024), key_dim=64, num_heads=(8, 12, 16), depth=(4, 4, 4), act_layer='silu'),
802
+
803
+ # deeper experiments
804
+ levit_256d=dict(
805
+ embed_dim=(256, 384, 512), key_dim=32, num_heads=(4, 6, 8), depth=(4, 8, 6), act_layer='silu'),
806
+ levit_512d=dict(
807
+ embed_dim=(512, 640, 768), key_dim=64, num_heads=(8, 10, 12), depth=(4, 8, 6), act_layer='silu'),
808
+ )
809
+
810
+
811
+ def create_levit(variant, cfg_variant=None, pretrained=False, distilled=True, **kwargs):
812
+ is_conv = '_conv' in variant
813
+ out_indices = kwargs.pop('out_indices', (0, 1, 2))
814
+ if kwargs.get('features_only', False) and not is_conv:
815
+ kwargs.setdefault('feature_cls', 'getter')
816
+ if cfg_variant is None:
817
+ if variant in model_cfgs:
818
+ cfg_variant = variant
819
+ elif is_conv:
820
+ cfg_variant = variant.replace('_conv', '')
821
+
822
+ model_cfg = dict(model_cfgs[cfg_variant], **kwargs)
823
+ model = build_model_with_cfg(
824
+ LevitDistilled if distilled else Levit,
825
+ variant,
826
+ pretrained,
827
+ pretrained_filter_fn=checkpoint_filter_fn,
828
+ feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
829
+ **model_cfg,
830
+ )
831
+ return model
832
+
833
+
834
+ def _cfg(url='', **kwargs):
835
+ return {
836
+ 'url': url,
837
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
838
+ 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
839
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
840
+ 'first_conv': 'stem.conv1.linear', 'classifier': ('head.linear', 'head_dist.linear'),
841
+ **kwargs
842
+ }
843
+
844
+
845
+ default_cfgs = generate_default_cfgs({
846
+ # weights in nn.Linear mode
847
+ 'levit_128s.fb_dist_in1k': _cfg(
848
+ hf_hub_id='timm/',
849
+ ),
850
+ 'levit_128.fb_dist_in1k': _cfg(
851
+ hf_hub_id='timm/',
852
+ ),
853
+ 'levit_192.fb_dist_in1k': _cfg(
854
+ hf_hub_id='timm/',
855
+ ),
856
+ 'levit_256.fb_dist_in1k': _cfg(
857
+ hf_hub_id='timm/',
858
+ ),
859
+ 'levit_384.fb_dist_in1k': _cfg(
860
+ hf_hub_id='timm/',
861
+ ),
862
+
863
+ # weights in nn.Conv2d mode
864
+ 'levit_conv_128s.fb_dist_in1k': _cfg(
865
+ hf_hub_id='timm/',
866
+ pool_size=(4, 4),
867
+ ),
868
+ 'levit_conv_128.fb_dist_in1k': _cfg(
869
+ hf_hub_id='timm/',
870
+ pool_size=(4, 4),
871
+ ),
872
+ 'levit_conv_192.fb_dist_in1k': _cfg(
873
+ hf_hub_id='timm/',
874
+ pool_size=(4, 4),
875
+ ),
876
+ 'levit_conv_256.fb_dist_in1k': _cfg(
877
+ hf_hub_id='timm/',
878
+ pool_size=(4, 4),
879
+ ),
880
+ 'levit_conv_384.fb_dist_in1k': _cfg(
881
+ hf_hub_id='timm/',
882
+ pool_size=(4, 4),
883
+ ),
884
+
885
+ 'levit_384_s8.untrained': _cfg(classifier='head.linear'),
886
+ 'levit_512_s8.untrained': _cfg(classifier='head.linear'),
887
+ 'levit_512.untrained': _cfg(classifier='head.linear'),
888
+ 'levit_256d.untrained': _cfg(classifier='head.linear'),
889
+ 'levit_512d.untrained': _cfg(classifier='head.linear'),
890
+
891
+ 'levit_conv_384_s8.untrained': _cfg(classifier='head.linear'),
892
+ 'levit_conv_512_s8.untrained': _cfg(classifier='head.linear'),
893
+ 'levit_conv_512.untrained': _cfg(classifier='head.linear'),
894
+ 'levit_conv_256d.untrained': _cfg(classifier='head.linear'),
895
+ 'levit_conv_512d.untrained': _cfg(classifier='head.linear'),
896
+ })
897
+
898
+
899
+ @register_model
900
+ def levit_128s(pretrained=False, **kwargs) -> Levit:
901
+ return create_levit('levit_128s', pretrained=pretrained, **kwargs)
902
+
903
+
904
+ @register_model
905
+ def levit_128(pretrained=False, **kwargs) -> Levit:
906
+ return create_levit('levit_128', pretrained=pretrained, **kwargs)
907
+
908
+
909
+ @register_model
910
+ def levit_192(pretrained=False, **kwargs) -> Levit:
911
+ return create_levit('levit_192', pretrained=pretrained, **kwargs)
912
+
913
+
914
+ @register_model
915
+ def levit_256(pretrained=False, **kwargs) -> Levit:
916
+ return create_levit('levit_256', pretrained=pretrained, **kwargs)
917
+
918
+
919
+ @register_model
920
+ def levit_384(pretrained=False, **kwargs) -> Levit:
921
+ return create_levit('levit_384', pretrained=pretrained, **kwargs)
922
+
923
+
924
+ @register_model
925
+ def levit_384_s8(pretrained=False, **kwargs) -> Levit:
926
+ return create_levit('levit_384_s8', pretrained=pretrained, **kwargs)
927
+
928
+
929
+ @register_model
930
+ def levit_512_s8(pretrained=False, **kwargs) -> Levit:
931
+ return create_levit('levit_512_s8', pretrained=pretrained, distilled=False, **kwargs)
932
+
933
+
934
+ @register_model
935
+ def levit_512(pretrained=False, **kwargs) -> Levit:
936
+ return create_levit('levit_512', pretrained=pretrained, distilled=False, **kwargs)
937
+
938
+
939
+ @register_model
940
+ def levit_256d(pretrained=False, **kwargs) -> Levit:
941
+ return create_levit('levit_256d', pretrained=pretrained, distilled=False, **kwargs)
942
+
943
+
944
+ @register_model
945
+ def levit_512d(pretrained=False, **kwargs) -> Levit:
946
+ return create_levit('levit_512d', pretrained=pretrained, distilled=False, **kwargs)
947
+
948
+
949
+ @register_model
950
+ def levit_conv_128s(pretrained=False, **kwargs) -> Levit:
951
+ return create_levit('levit_conv_128s', pretrained=pretrained, use_conv=True, **kwargs)
952
+
953
+
954
+ @register_model
955
+ def levit_conv_128(pretrained=False, **kwargs) -> Levit:
956
+ return create_levit('levit_conv_128', pretrained=pretrained, use_conv=True, **kwargs)
957
+
958
+
959
+ @register_model
960
+ def levit_conv_192(pretrained=False, **kwargs) -> Levit:
961
+ return create_levit('levit_conv_192', pretrained=pretrained, use_conv=True, **kwargs)
962
+
963
+
964
+ @register_model
965
+ def levit_conv_256(pretrained=False, **kwargs) -> Levit:
966
+ return create_levit('levit_conv_256', pretrained=pretrained, use_conv=True, **kwargs)
967
+
968
+
969
+ @register_model
970
+ def levit_conv_384(pretrained=False, **kwargs) -> Levit:
971
+ return create_levit('levit_conv_384', pretrained=pretrained, use_conv=True, **kwargs)
972
+
973
+
974
+ @register_model
975
+ def levit_conv_384_s8(pretrained=False, **kwargs) -> Levit:
976
+ return create_levit('levit_conv_384_s8', pretrained=pretrained, use_conv=True, **kwargs)
977
+
978
+
979
+ @register_model
980
+ def levit_conv_512_s8(pretrained=False, **kwargs) -> Levit:
981
+ return create_levit('levit_conv_512_s8', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
982
+
983
+
984
+ @register_model
985
+ def levit_conv_512(pretrained=False, **kwargs) -> Levit:
986
+ return create_levit('levit_conv_512', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
987
+
988
+
989
+ @register_model
990
+ def levit_conv_256d(pretrained=False, **kwargs) -> Levit:
991
+ return create_levit('levit_conv_256d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
992
+
993
+
994
+ @register_model
995
+ def levit_conv_512d(pretrained=False, **kwargs) -> Levit:
996
+ return create_levit('levit_conv_512d', pretrained=pretrained, use_conv=True, distilled=False, **kwargs)
997
+
pytorch-image-models/timm/models/mambaout.py ADDED
@@ -0,0 +1,642 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MambaOut models for image classification.
3
+ Some implementations are modified from:
4
+ timm (https://github.com/rwightman/pytorch-image-models),
5
+ MetaFormer (https://github.com/sail-sg/metaformer),
6
+ InceptionNeXt (https://github.com/sail-sg/inceptionnext)
7
+ """
8
+ from collections import OrderedDict
9
+ from typing import Optional
10
+
11
+ import torch
12
+ from torch import nn
13
+
14
+ from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
15
+ from timm.layers import trunc_normal_, DropPath, LayerNorm, LayerScale, ClNormMlpClassifierHead, get_act_layer
16
+ from ._builder import build_model_with_cfg
17
+ from ._manipulate import checkpoint_seq
18
+ from ._registry import register_model, generate_default_cfgs
19
+
20
+
21
+ class Stem(nn.Module):
22
+ r""" Code modified from InternImage:
23
+ https://github.com/OpenGVLab/InternImage
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ in_chs=3,
29
+ out_chs=96,
30
+ mid_norm: bool = True,
31
+ act_layer=nn.GELU,
32
+ norm_layer=LayerNorm,
33
+ ):
34
+ super().__init__()
35
+ self.conv1 = nn.Conv2d(
36
+ in_chs,
37
+ out_chs // 2,
38
+ kernel_size=3,
39
+ stride=2,
40
+ padding=1
41
+ )
42
+ self.norm1 = norm_layer(out_chs // 2) if mid_norm else None
43
+ self.act = act_layer()
44
+ self.conv2 = nn.Conv2d(
45
+ out_chs // 2,
46
+ out_chs,
47
+ kernel_size=3,
48
+ stride=2,
49
+ padding=1
50
+ )
51
+ self.norm2 = norm_layer(out_chs)
52
+
53
+ def forward(self, x):
54
+ x = self.conv1(x)
55
+ if self.norm1 is not None:
56
+ x = x.permute(0, 2, 3, 1)
57
+ x = self.norm1(x)
58
+ x = x.permute(0, 3, 1, 2)
59
+ x = self.act(x)
60
+ x = self.conv2(x)
61
+ x = x.permute(0, 2, 3, 1)
62
+ x = self.norm2(x)
63
+ return x
64
+
65
+
66
+ class DownsampleNormFirst(nn.Module):
67
+
68
+ def __init__(
69
+ self,
70
+ in_chs=96,
71
+ out_chs=198,
72
+ norm_layer=LayerNorm,
73
+ ):
74
+ super().__init__()
75
+ self.norm = norm_layer(in_chs)
76
+ self.conv = nn.Conv2d(
77
+ in_chs,
78
+ out_chs,
79
+ kernel_size=3,
80
+ stride=2,
81
+ padding=1
82
+ )
83
+
84
+ def forward(self, x):
85
+ x = self.norm(x)
86
+ x = x.permute(0, 3, 1, 2)
87
+ x = self.conv(x)
88
+ x = x.permute(0, 2, 3, 1)
89
+ return x
90
+
91
+
92
+ class Downsample(nn.Module):
93
+
94
+ def __init__(
95
+ self,
96
+ in_chs=96,
97
+ out_chs=198,
98
+ norm_layer=LayerNorm,
99
+ ):
100
+ super().__init__()
101
+ self.conv = nn.Conv2d(
102
+ in_chs,
103
+ out_chs,
104
+ kernel_size=3,
105
+ stride=2,
106
+ padding=1
107
+ )
108
+ self.norm = norm_layer(out_chs)
109
+
110
+ def forward(self, x):
111
+ x = x.permute(0, 3, 1, 2)
112
+ x = self.conv(x)
113
+ x = x.permute(0, 2, 3, 1)
114
+ x = self.norm(x)
115
+ return x
116
+
117
+
118
+ class MlpHead(nn.Module):
119
+ """ MLP classification head
120
+ """
121
+
122
+ def __init__(
123
+ self,
124
+ in_features,
125
+ num_classes=1000,
126
+ pool_type='avg',
127
+ act_layer=nn.GELU,
128
+ mlp_ratio=4,
129
+ norm_layer=LayerNorm,
130
+ drop_rate=0.,
131
+ bias=True,
132
+ ):
133
+ super().__init__()
134
+ if mlp_ratio is not None:
135
+ hidden_size = int(mlp_ratio * in_features)
136
+ else:
137
+ hidden_size = None
138
+ self.pool_type = pool_type
139
+ self.in_features = in_features
140
+ self.hidden_size = hidden_size or in_features
141
+
142
+ self.norm = norm_layer(in_features)
143
+ if hidden_size:
144
+ self.pre_logits = nn.Sequential(OrderedDict([
145
+ ('fc', nn.Linear(in_features, hidden_size)),
146
+ ('act', act_layer()),
147
+ ('norm', norm_layer(hidden_size))
148
+ ]))
149
+ self.num_features = hidden_size
150
+ else:
151
+ self.num_features = in_features
152
+ self.pre_logits = nn.Identity()
153
+
154
+ self.fc = nn.Linear(self.num_features, num_classes, bias=bias) if num_classes > 0 else nn.Identity()
155
+ self.head_dropout = nn.Dropout(drop_rate)
156
+
157
+ def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
158
+ if pool_type is not None:
159
+ self.pool_type = pool_type
160
+ if reset_other:
161
+ self.norm = nn.Identity()
162
+ self.pre_logits = nn.Identity()
163
+ self.num_features = self.in_features
164
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
165
+
166
+ def forward(self, x, pre_logits: bool = False):
167
+ if self.pool_type == 'avg':
168
+ x = x.mean((1, 2))
169
+ x = self.norm(x)
170
+ x = self.pre_logits(x)
171
+ x = self.head_dropout(x)
172
+ if pre_logits:
173
+ return x
174
+ x = self.fc(x)
175
+ return x
176
+
177
+
178
+ class GatedConvBlock(nn.Module):
179
+ r""" Our implementation of Gated CNN Block: https://arxiv.org/pdf/1612.08083
180
+ Args:
181
+ conv_ratio: control the number of channels to conduct depthwise convolution.
182
+ Conduct convolution on partial channels can improve paraitcal efficiency.
183
+ The idea of partial channels is from ShuffleNet V2 (https://arxiv.org/abs/1807.11164) and
184
+ also used by InceptionNeXt (https://arxiv.org/abs/2303.16900) and FasterNet (https://arxiv.org/abs/2303.03667)
185
+ """
186
+
187
+ def __init__(
188
+ self,
189
+ dim,
190
+ expansion_ratio=8 / 3,
191
+ kernel_size=7,
192
+ conv_ratio=1.0,
193
+ ls_init_value=None,
194
+ norm_layer=LayerNorm,
195
+ act_layer=nn.GELU,
196
+ drop_path=0.,
197
+ **kwargs
198
+ ):
199
+ super().__init__()
200
+ self.norm = norm_layer(dim)
201
+ hidden = int(expansion_ratio * dim)
202
+ self.fc1 = nn.Linear(dim, hidden * 2)
203
+ self.act = act_layer()
204
+ conv_channels = int(conv_ratio * dim)
205
+ self.split_indices = (hidden, hidden - conv_channels, conv_channels)
206
+ self.conv = nn.Conv2d(
207
+ conv_channels,
208
+ conv_channels,
209
+ kernel_size=kernel_size,
210
+ padding=kernel_size // 2,
211
+ groups=conv_channels
212
+ )
213
+ self.fc2 = nn.Linear(hidden, dim)
214
+ self.ls = LayerScale(dim) if ls_init_value is not None else nn.Identity()
215
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
216
+
217
+ def forward(self, x):
218
+ shortcut = x # [B, H, W, C]
219
+ x = self.norm(x)
220
+ x = self.fc1(x)
221
+ g, i, c = torch.split(x, self.split_indices, dim=-1)
222
+ c = c.permute(0, 3, 1, 2) # [B, H, W, C] -> [B, C, H, W]
223
+ c = self.conv(c)
224
+ c = c.permute(0, 2, 3, 1) # [B, C, H, W] -> [B, H, W, C]
225
+ x = self.fc2(self.act(g) * torch.cat((i, c), dim=-1))
226
+ x = self.ls(x)
227
+ x = self.drop_path(x)
228
+ return x + shortcut
229
+
230
+
231
+ class MambaOutStage(nn.Module):
232
+
233
+ def __init__(
234
+ self,
235
+ dim,
236
+ dim_out: Optional[int] = None,
237
+ depth: int = 4,
238
+ expansion_ratio=8 / 3,
239
+ kernel_size=7,
240
+ conv_ratio=1.0,
241
+ downsample: str = '',
242
+ ls_init_value: Optional[float] = None,
243
+ norm_layer=LayerNorm,
244
+ act_layer=nn.GELU,
245
+ drop_path=0.,
246
+ ):
247
+ super().__init__()
248
+ dim_out = dim_out or dim
249
+ self.grad_checkpointing = False
250
+
251
+ if downsample == 'conv':
252
+ self.downsample = Downsample(dim, dim_out, norm_layer=norm_layer)
253
+ elif downsample == 'conv_nf':
254
+ self.downsample = DownsampleNormFirst(dim, dim_out, norm_layer=norm_layer)
255
+ else:
256
+ assert dim == dim_out
257
+ self.downsample = nn.Identity()
258
+
259
+ self.blocks = nn.Sequential(*[
260
+ GatedConvBlock(
261
+ dim=dim_out,
262
+ expansion_ratio=expansion_ratio,
263
+ kernel_size=kernel_size,
264
+ conv_ratio=conv_ratio,
265
+ ls_init_value=ls_init_value,
266
+ norm_layer=norm_layer,
267
+ act_layer=act_layer,
268
+ drop_path=drop_path[j] if isinstance(drop_path, (list, tuple)) else drop_path,
269
+ )
270
+ for j in range(depth)
271
+ ])
272
+
273
+ def forward(self, x):
274
+ x = self.downsample(x)
275
+ if self.grad_checkpointing and not torch.jit.is_scripting():
276
+ x = checkpoint_seq(self.blocks, x)
277
+ else:
278
+ x = self.blocks(x)
279
+ return x
280
+
281
+
282
+ class MambaOut(nn.Module):
283
+ r""" MetaFormer
284
+ A PyTorch impl of : `MetaFormer Baselines for Vision` -
285
+ https://arxiv.org/abs/2210.13452
286
+
287
+ Args:
288
+ in_chans (int): Number of input image channels. Default: 3.
289
+ num_classes (int): Number of classes for classification head. Default: 1000.
290
+ depths (list or tuple): Number of blocks at each stage. Default: [3, 3, 9, 3].
291
+ dims (int): Feature dimension at each stage. Default: [96, 192, 384, 576].
292
+ downsample_layers: (list or tuple): Downsampling layers before each stage.
293
+ drop_path_rate (float): Stochastic depth rate. Default: 0.
294
+ output_norm: norm before classifier head. Default: partial(nn.LayerNorm, eps=1e-6).
295
+ head_fn: classification head. Default: nn.Linear.
296
+ head_dropout (float): dropout for MLP classifier. Default: 0.
297
+ """
298
+
299
+ def __init__(
300
+ self,
301
+ in_chans=3,
302
+ num_classes=1000,
303
+ global_pool='avg',
304
+ depths=(3, 3, 9, 3),
305
+ dims=(96, 192, 384, 576),
306
+ norm_layer=LayerNorm,
307
+ act_layer=nn.GELU,
308
+ conv_ratio=1.0,
309
+ expansion_ratio=8/3,
310
+ kernel_size=7,
311
+ stem_mid_norm=True,
312
+ ls_init_value=None,
313
+ downsample='conv',
314
+ drop_path_rate=0.,
315
+ drop_rate=0.,
316
+ head_fn='default',
317
+ ):
318
+ super().__init__()
319
+ self.num_classes = num_classes
320
+ self.drop_rate = drop_rate
321
+ self.output_fmt = 'NHWC'
322
+ if not isinstance(depths, (list, tuple)):
323
+ depths = [depths] # it means the model has only one stage
324
+ if not isinstance(dims, (list, tuple)):
325
+ dims = [dims]
326
+ act_layer = get_act_layer(act_layer)
327
+
328
+ num_stage = len(depths)
329
+ self.num_stage = num_stage
330
+ self.feature_info = []
331
+
332
+ self.stem = Stem(
333
+ in_chans,
334
+ dims[0],
335
+ mid_norm=stem_mid_norm,
336
+ act_layer=act_layer,
337
+ norm_layer=norm_layer,
338
+ )
339
+ prev_dim = dims[0]
340
+ dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
341
+ cur = 0
342
+ curr_stride = 4
343
+ self.stages = nn.Sequential()
344
+ for i in range(num_stage):
345
+ dim = dims[i]
346
+ stride = 2 if curr_stride == 2 or i > 0 else 1
347
+ curr_stride *= stride
348
+ stage = MambaOutStage(
349
+ dim=prev_dim,
350
+ dim_out=dim,
351
+ depth=depths[i],
352
+ kernel_size=kernel_size,
353
+ conv_ratio=conv_ratio,
354
+ expansion_ratio=expansion_ratio,
355
+ downsample=downsample if i > 0 else '',
356
+ ls_init_value=ls_init_value,
357
+ norm_layer=norm_layer,
358
+ act_layer=act_layer,
359
+ drop_path=dp_rates[i],
360
+ )
361
+ self.stages.append(stage)
362
+ prev_dim = dim
363
+ # NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
364
+ self.feature_info += [dict(num_chs=prev_dim, reduction=curr_stride, module=f'stages.{i}')]
365
+ cur += depths[i]
366
+
367
+ if head_fn == 'default':
368
+ # specific to this model, unusual norm -> pool -> fc -> act -> norm -> fc combo
369
+ self.head = MlpHead(
370
+ prev_dim,
371
+ num_classes,
372
+ pool_type=global_pool,
373
+ drop_rate=drop_rate,
374
+ norm_layer=norm_layer,
375
+ )
376
+ else:
377
+ # more typical norm -> pool -> fc -> act -> fc
378
+ self.head = ClNormMlpClassifierHead(
379
+ prev_dim,
380
+ num_classes,
381
+ hidden_size=int(prev_dim * 4),
382
+ pool_type=global_pool,
383
+ norm_layer=norm_layer,
384
+ drop_rate=drop_rate,
385
+ )
386
+ self.num_features = prev_dim
387
+ self.head_hidden_size = self.head.num_features
388
+
389
+ self.apply(self._init_weights)
390
+
391
+ def _init_weights(self, m):
392
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
393
+ trunc_normal_(m.weight, std=.02)
394
+ if m.bias is not None:
395
+ nn.init.constant_(m.bias, 0)
396
+
397
+ @torch.jit.ignore
398
+ def group_matcher(self, coarse=False):
399
+ return dict(
400
+ stem=r'^stem',
401
+ blocks=r'^stages\.(\d+)' if coarse else [
402
+ (r'^stages\.(\d+)\.downsample', (0,)), # blocks
403
+ (r'^stages\.(\d+)\.blocks\.(\d+)', None),
404
+ ]
405
+ )
406
+
407
+ @torch.jit.ignore
408
+ def set_grad_checkpointing(self, enable=True):
409
+ for s in self.stages:
410
+ s.grad_checkpointing = enable
411
+
412
+ @torch.jit.ignore
413
+ def get_classifier(self) -> nn.Module:
414
+ return self.head.fc
415
+
416
+ def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
417
+ self.num_classes = num_classes
418
+ self.head.reset(num_classes, global_pool)
419
+
420
+ def forward_features(self, x):
421
+ x = self.stem(x)
422
+ x = self.stages(x)
423
+ return x
424
+
425
+ def forward_head(self, x, pre_logits: bool = False):
426
+ x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
427
+ return x
428
+
429
+ def forward(self, x):
430
+ x = self.forward_features(x)
431
+ x = self.forward_head(x)
432
+ return x
433
+
434
+
435
+ def checkpoint_filter_fn(state_dict, model):
436
+ if 'model' in state_dict:
437
+ state_dict = state_dict['model']
438
+ if 'stem.conv1.weight' in state_dict:
439
+ return state_dict
440
+
441
+ import re
442
+ out_dict = {}
443
+ for k, v in state_dict.items():
444
+ k = k.replace('downsample_layers.0.', 'stem.')
445
+ k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
446
+ k = re.sub(r'downsample_layers.([0-9]+)', r'stages.\1.downsample', k)
447
+ # remap head names
448
+ if k.startswith('norm.'):
449
+ # this is moving to head since it's after the pooling
450
+ k = k.replace('norm.', 'head.norm.')
451
+ elif k.startswith('head.'):
452
+ k = k.replace('head.fc1.', 'head.pre_logits.fc.')
453
+ k = k.replace('head.norm.', 'head.pre_logits.norm.')
454
+ k = k.replace('head.fc2.', 'head.fc.')
455
+ out_dict[k] = v
456
+
457
+ return out_dict
458
+
459
+
460
+ def _cfg(url='', **kwargs):
461
+ return {
462
+ 'url': url,
463
+ 'num_classes': 1000, 'input_size': (3, 224, 224), 'test_input_size': (3, 288, 288),
464
+ 'pool_size': (7, 7), 'crop_pct': 1.0, 'interpolation': 'bicubic',
465
+ 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
466
+ 'first_conv': 'stem.conv1', 'classifier': 'head.fc',
467
+ **kwargs
468
+ }
469
+
470
+
471
+ default_cfgs = generate_default_cfgs({
472
+ # original weights
473
+ 'mambaout_femto.in1k': _cfg(
474
+ hf_hub_id='timm/'),
475
+ 'mambaout_kobe.in1k': _cfg(
476
+ hf_hub_id='timm/'),
477
+ 'mambaout_tiny.in1k': _cfg(
478
+ hf_hub_id='timm/'),
479
+ 'mambaout_small.in1k': _cfg(
480
+ hf_hub_id='timm/'),
481
+ 'mambaout_base.in1k': _cfg(
482
+ hf_hub_id='timm/'),
483
+
484
+ # timm experiments below
485
+ 'mambaout_small_rw.sw_e450_in1k': _cfg(
486
+ hf_hub_id='timm/',
487
+ ),
488
+ 'mambaout_base_short_rw.sw_e500_in1k': _cfg(
489
+ hf_hub_id='timm/',
490
+ crop_pct=0.95, test_crop_pct=1.0,
491
+ ),
492
+ 'mambaout_base_tall_rw.sw_e500_in1k': _cfg(
493
+ hf_hub_id='timm/',
494
+ crop_pct=0.95, test_crop_pct=1.0,
495
+ ),
496
+ 'mambaout_base_wide_rw.sw_e500_in1k': _cfg(
497
+ hf_hub_id='timm/',
498
+ crop_pct=0.95, test_crop_pct=1.0,
499
+ ),
500
+ 'mambaout_base_plus_rw.sw_e150_in12k_ft_in1k': _cfg(
501
+ hf_hub_id='timm/',
502
+ ),
503
+ 'mambaout_base_plus_rw.sw_e150_r384_in12k_ft_in1k': _cfg(
504
+ hf_hub_id='timm/',
505
+ input_size=(3, 384, 384), test_input_size=(3, 384, 384), crop_mode='squash', pool_size=(12, 12),
506
+ ),
507
+ 'mambaout_base_plus_rw.sw_e150_in12k': _cfg(
508
+ hf_hub_id='timm/',
509
+ num_classes=11821,
510
+ ),
511
+ 'test_mambaout': _cfg(input_size=(3, 160, 160), test_input_size=(3, 192, 192), pool_size=(5, 5)),
512
+ })
513
+
514
+
515
+ def _create_mambaout(variant, pretrained=False, **kwargs):
516
+ model = build_model_with_cfg(
517
+ MambaOut, variant, pretrained,
518
+ pretrained_filter_fn=checkpoint_filter_fn,
519
+ feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
520
+ **kwargs,
521
+ )
522
+ return model
523
+
524
+
525
+ # a series of MambaOut models
526
+ @register_model
527
+ def mambaout_femto(pretrained=False, **kwargs):
528
+ model_args = dict(depths=(3, 3, 9, 3), dims=(48, 96, 192, 288))
529
+ return _create_mambaout('mambaout_femto', pretrained=pretrained, **dict(model_args, **kwargs))
530
+
531
+ # Kobe Memorial Version with 24 Gated CNN blocks
532
+ @register_model
533
+ def mambaout_kobe(pretrained=False, **kwargs):
534
+ model_args = dict(depths=[3, 3, 15, 3], dims=[48, 96, 192, 288])
535
+ return _create_mambaout('mambaout_kobe', pretrained=pretrained, **dict(model_args, **kwargs))
536
+
537
+ @register_model
538
+ def mambaout_tiny(pretrained=False, **kwargs):
539
+ model_args = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 576])
540
+ return _create_mambaout('mambaout_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
541
+
542
+
543
+ @register_model
544
+ def mambaout_small(pretrained=False, **kwargs):
545
+ model_args = dict(depths=[3, 4, 27, 3], dims=[96, 192, 384, 576])
546
+ return _create_mambaout('mambaout_small', pretrained=pretrained, **dict(model_args, **kwargs))
547
+
548
+
549
+ @register_model
550
+ def mambaout_base(pretrained=False, **kwargs):
551
+ model_args = dict(depths=[3, 4, 27, 3], dims=[128, 256, 512, 768])
552
+ return _create_mambaout('mambaout_base', pretrained=pretrained, **dict(model_args, **kwargs))
553
+
554
+
555
+ @register_model
556
+ def mambaout_small_rw(pretrained=False, **kwargs):
557
+ model_args = dict(
558
+ depths=[3, 4, 27, 3],
559
+ dims=[96, 192, 384, 576],
560
+ stem_mid_norm=False,
561
+ downsample='conv_nf',
562
+ ls_init_value=1e-6,
563
+ head_fn='norm_mlp',
564
+ )
565
+ return _create_mambaout('mambaout_small_rw', pretrained=pretrained, **dict(model_args, **kwargs))
566
+
567
+
568
+ @register_model
569
+ def mambaout_base_short_rw(pretrained=False, **kwargs):
570
+ model_args = dict(
571
+ depths=(3, 3, 25, 3),
572
+ dims=(128, 256, 512, 768),
573
+ expansion_ratio=3.0,
574
+ conv_ratio=1.25,
575
+ stem_mid_norm=False,
576
+ downsample='conv_nf',
577
+ ls_init_value=1e-6,
578
+ head_fn='norm_mlp',
579
+ )
580
+ return _create_mambaout('mambaout_base_short_rw', pretrained=pretrained, **dict(model_args, **kwargs))
581
+
582
+
583
+ @register_model
584
+ def mambaout_base_tall_rw(pretrained=False, **kwargs):
585
+ model_args = dict(
586
+ depths=(3, 4, 30, 3),
587
+ dims=(128, 256, 512, 768),
588
+ expansion_ratio=2.5,
589
+ conv_ratio=1.25,
590
+ stem_mid_norm=False,
591
+ downsample='conv_nf',
592
+ ls_init_value=1e-6,
593
+ head_fn='norm_mlp',
594
+ )
595
+ return _create_mambaout('mambaout_base_tall_rw', pretrained=pretrained, **dict(model_args, **kwargs))
596
+
597
+
598
+ @register_model
599
+ def mambaout_base_wide_rw(pretrained=False, **kwargs):
600
+ model_args = dict(
601
+ depths=(3, 4, 27, 3),
602
+ dims=(128, 256, 512, 768),
603
+ expansion_ratio=3.0,
604
+ conv_ratio=1.5,
605
+ stem_mid_norm=False,
606
+ downsample='conv_nf',
607
+ ls_init_value=1e-6,
608
+ act_layer='silu',
609
+ head_fn='norm_mlp',
610
+ )
611
+ return _create_mambaout('mambaout_base_wide_rw', pretrained=pretrained, **dict(model_args, **kwargs))
612
+
613
+
614
+ @register_model
615
+ def mambaout_base_plus_rw(pretrained=False, **kwargs):
616
+ model_args = dict(
617
+ depths=(3, 4, 30, 3),
618
+ dims=(128, 256, 512, 768),
619
+ expansion_ratio=3.0,
620
+ conv_ratio=1.5,
621
+ stem_mid_norm=False,
622
+ downsample='conv_nf',
623
+ ls_init_value=1e-6,
624
+ act_layer='silu',
625
+ head_fn='norm_mlp',
626
+ )
627
+ return _create_mambaout('mambaout_base_plus_rw', pretrained=pretrained, **dict(model_args, **kwargs))
628
+
629
+
630
+ @register_model
631
+ def test_mambaout(pretrained=False, **kwargs):
632
+ model_args = dict(
633
+ depths=(1, 1, 3, 1),
634
+ dims=(16, 32, 48, 64),
635
+ expansion_ratio=3,
636
+ stem_mid_norm=False,
637
+ downsample='conv_nf',
638
+ ls_init_value=1e-4,
639
+ act_layer='silu',
640
+ head_fn='norm_mlp',
641
+ )
642
+ return _create_mambaout('test_mambaout', pretrained=pretrained, **dict(model_args, **kwargs))